code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from datetime import datetime
import requests
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url='''
SCREAMING_SNAKE_CASE__ = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src''']
return requests.get(lowercase__ ).content
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Optional[int] = input('''Enter Video/IGTV url: ''').strip()
_SCREAMING_SNAKE_CASE : List[str] = F"{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"
with open(file_name, '''wb''') as fp:
fp.write(download_video(url))
print(F"Done. Video saved to disk as {file_name}.")
| 493 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase__ , lowerCAmelCase__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 668 | 0 |
from collections import namedtuple
UpperCAmelCase_ : Dict = namedtuple('''from_to''', '''from_ to''')
UpperCAmelCase_ : str = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.0_0454, 264.172),
"cubicyard": from_to(0.7_6455, 1.3_0795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.0_0023_6588, 4226.75),
}
def __SCREAMING_SNAKE_CASE ( a__ : float ,a__ : str ,a__ : str ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid \'from_type\' value: {from_type!r} Supported values are:\n"""
+ """, """.join(lowercase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid \'to_type\' value: {to_type!r}. Supported values are:\n"""
+ """, """.join(lowercase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=13 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=99 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Any=16 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : int=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Tuple ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ :Union[str, Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :int = True
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :List[Any] = True
UpperCamelCase_ :Dict = True
def __snake_case ( self : Dict ):
lowerCAmelCase__ = DistilBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def __snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Tuple ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
lowerCAmelCase__ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : str ):
lowerCAmelCase__ = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 668 | 0 |
'''simple docstring'''
import string
from math import logaa
def UpperCamelCase ( lowercase_ : str , lowercase_ : str ) -> int:
'''simple docstring'''
lowercase =document.translate(
str.maketrans('''''' , '''''' , string.punctuation ) ).replace('''\n''' , '''''' )
lowercase =document_without_punctuation.split(''' ''' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def UpperCamelCase ( lowercase_ : str , lowercase_ : str ) -> tuple[int, int]:
'''simple docstring'''
lowercase =corpus.lower().translate(
str.maketrans('''''' , '''''' , string.punctuation ) ) # strip all punctuation and replace it with ''
lowercase =corpus_without_punctuation.split('''\n''' )
lowercase =term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowercase__ ))
def UpperCamelCase ( lowercase_ : int , lowercase_ : int , lowercase_ : List[Any]=False ) -> float:
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('''df must be > 0''' )
elif n == 0:
raise ValueError('''log10(0) is undefined.''' )
return round(logaa(n / df ) , 3 )
def UpperCamelCase ( lowercase_ : int , lowercase_ : int ) -> float:
'''simple docstring'''
return round(tf * idf , 3 )
| 72 |
from typing import Any
def lowerCAmelCase_ (lowercase__ : list , lowercase__ : list , lowercase__ : dict , lowercase__ : dict , lowercase__ : dict , ) -> list:
'''simple docstring'''
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for state in states_space:
lowerCAmelCase__ = observations_space[0]
lowerCAmelCase__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
lowerCAmelCase__ = observations_space[o]
lowerCAmelCase__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
# Update probabilities and pointers dicts
lowerCAmelCase__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase__ = arg_max
# The final observation
lowerCAmelCase__ = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
lowerCAmelCase__ = arg_max
# Process pointers backwards
lowerCAmelCase__ = last_state
lowerCAmelCase__ = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
lowerCAmelCase__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any ) -> None:
'''simple docstring'''
_validate_list(lowercase__ , '''observations_space''' )
_validate_list(lowercase__ , '''states_space''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list'
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list of strings'
raise ValueError(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_dict(lowercase__ , '''initial_probabilities''' , lowercase__ )
_validate_nested_dict(lowercase__ , '''transition_probabilities''' )
_validate_nested_dict(lowercase__ , '''emission_probabilities''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str , lowercase__ : type , lowercase__ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a dict'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
lowerCAmelCase__ = f'{var_name} all keys must be strings'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
lowerCAmelCase__ = '''nested dictionary ''' if nested else ''''''
lowerCAmelCase__ = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 668 | 0 |
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
a__: Optional[Any] = 0
a__: Tuple = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a__: str = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
a__: int = tuple[int, int]
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
A__ = pos_x
A__ = pos_y
A__ = (pos_y, pos_x)
A__ = goal_x
A__ = goal_y
A__ = g_cost
A__ = parent
A__ = self.calculate_heuristic()
A__ = self.g_cost + self.h_cost
def UpperCamelCase ( self ):
A__ = self.pos_x - self.goal_x
A__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(SCREAMING_SNAKE_CASE_ ) + abs(SCREAMING_SNAKE_CASE_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self,__lowerCamelCase ):
return self.f_cost < other.f_cost
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
A__ = Node(start[1],start[0],goal[1],goal[0],0,SCREAMING_SNAKE_CASE_ )
A__ = Node(goal[1],goal[0],goal[1],goal[0],9_9999,SCREAMING_SNAKE_CASE_ )
A__ = [self.start]
A__ = []
A__ = False
def UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(SCREAMING_SNAKE_CASE_ )
self.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
A__ = self.get_successors(SCREAMING_SNAKE_CASE_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
# retrieve the best current path
A__ = self.open_nodes.pop(self.open_nodes.index(SCREAMING_SNAKE_CASE_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
self.open_nodes.append(SCREAMING_SNAKE_CASE_ )
return [self.start.pos]
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = []
for action in delta:
A__ = parent.pos_x + action[1]
A__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(SCREAMING_SNAKE_CASE_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
SCREAMING_SNAKE_CASE_,SCREAMING_SNAKE_CASE_,self.target.pos_y,self.target.pos_x,parent.g_cost + 1,SCREAMING_SNAKE_CASE_,) )
return successors
def UpperCamelCase ( self,__lowerCamelCase ):
A__ = node
A__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A__ = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
A__ = AStar(SCREAMING_SNAKE_CASE_,SCREAMING_SNAKE_CASE_ )
A__ = AStar(SCREAMING_SNAKE_CASE_,SCREAMING_SNAKE_CASE_ )
A__ = False
def UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
A__ = self.fwd_astar.open_nodes.pop(0 )
A__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
SCREAMING_SNAKE_CASE_,SCREAMING_SNAKE_CASE_ )
self.fwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
self.bwd_astar.closed_nodes.append(SCREAMING_SNAKE_CASE_ )
A__ = current_bwd_node
A__ = current_fwd_node
A__ = {
self.fwd_astar: self.fwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ),
self.bwd_astar: self.bwd_astar.get_successors(SCREAMING_SNAKE_CASE_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
# retrieve the best current path
A__ = astar.open_nodes.pop(
astar.open_nodes.index(SCREAMING_SNAKE_CASE_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
else:
astar.open_nodes.append(SCREAMING_SNAKE_CASE_ )
return [self.fwd_astar.start.pos]
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ = self.fwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ )
A__ = self.bwd_astar.retrace_path(SCREAMING_SNAKE_CASE_ )
bwd_path.pop()
bwd_path.reverse()
A__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
a__: int = (0, 0)
a__: Optional[int] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a__: Union[str, Any] = time.time()
a__: Any = AStar(init, goal)
a__: Optional[int] = a_star.search()
a__: Optional[int] = time.time() - start_time
print(F"AStar execution time = {end_time:f} seconds")
a__: Tuple = time.time()
a__: Union[str, Any] = BidirectionalAStar(init, goal)
a__: Any = time.time() - bd_start_time
print(F"BidirectionalAStar execution time = {bd_end_time:f} seconds")
| 190 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = ['audio_values', 'audio_mask']
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_048 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Dict=[16, 16] , SCREAMING_SNAKE_CASE_ : Tuple=128 , SCREAMING_SNAKE_CASE_ : Optional[Any]=44_100 , SCREAMING_SNAKE_CASE_ : Optional[int]=86 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_048 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : int , ):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spectrogram_length
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = feature_size // self.patch_size[1]
lowerCAmelCase__ = n_fft
lowerCAmelCase__ = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = padding_value
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=SCREAMING_SNAKE_CASE_ , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : np.array ):
lowerCAmelCase__ = spectrogram(
SCREAMING_SNAKE_CASE_ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowerCAmelCase__ = log_spec[:, :-1]
lowerCAmelCase__ = log_spec - 20.0
lowerCAmelCase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase__ = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase__ = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
lowerCAmelCase__ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase__ = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase__ = np.ones([len(SCREAMING_SNAKE_CASE_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase__ = padded_audio_features * self.padding_value
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = audio_features[i]
lowerCAmelCase__ = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase__ = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCAmelCase__ = {'''audio_values''': padded_audio_features}
lowerCAmelCase__ = BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
return encoded_inputs
| 668 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {"vocab_file": "sentencepiece.bpe.model"}
_SCREAMING_SNAKE_CASE = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_SCREAMING_SNAKE_CASE = {
"camembert-base": 5_12,
}
_SCREAMING_SNAKE_CASE = "▁"
class __UpperCAmelCase ( snake_case__ ):
'''simple docstring'''
_UpperCamelCase = VOCAB_FILES_NAMES
_UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase = ['input_ids', 'attention_mask']
def __init__( self : Dict , _lowercase : int , _lowercase : Any="<s>" , _lowercase : Tuple="</s>" , _lowercase : Optional[Any]="</s>" , _lowercase : Optional[int]="<s>" , _lowercase : List[Any]="<unk>" , _lowercase : Optional[Any]="<pad>" , _lowercase : str="<mask>" , _lowercase : int=["<s>NOTUSED", "</s>NOTUSED"] , _lowercase : Optional[Dict[str, Any]] = None , **_lowercase : str , ) -> int:
# Mask token behave like a normal word, i.e. include the space before it
A_ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) else mask_token
A_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_))
A_ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
A_ = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
A_ = len(self.fairseq_tokens_to_ids)
A_ = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
A_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __snake_case ( self : Any , _lowercase : List[int] , _lowercase : Optional[List[int]] = None) -> Tuple:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ = [self.cls_token_id]
A_ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : List[Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False) -> Optional[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_)
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_)) + [1]
def __snake_case ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None) -> str:
A_ = [self.sep_token_id]
A_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def __snake_case ( self : List[Any]) -> int:
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def __snake_case ( self : int) -> Tuple:
A_ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __snake_case ( self : Tuple , _lowercase : str) -> Tuple:
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_)
def __snake_case ( self : Union[str, Any] , _lowercase : List[Any]) -> Optional[int]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_)
def __snake_case ( self : Dict , _lowercase : Dict) -> Optional[Any]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def __snake_case ( self : int , _lowercase : Optional[int]) -> Dict:
A_ = []
A_ = ''
A_ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_) + token
A_ = True
A_ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_)
A_ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_)
return out_string.strip()
def __getstate__( self : Optional[Any]) -> Tuple:
A_ = self.__dict__.copy()
A_ = None
return state
def __setstate__( self : str , _lowercase : List[Any]) -> List[str]:
A_ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
A_ = {}
A_ = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def __snake_case ( self : Tuple , _lowercase : str , _lowercase : Optional[str] = None) -> Union[str, Any]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
A_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE_ , 'wb') as fi:
A_ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_)
return (out_vocab_file,)
| 366 |
from collections import namedtuple
_UpperCAmelCase : Dict = namedtuple("from_to", "from_ to")
_UpperCAmelCase : str = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : str , lowercase__ : str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : int = logging.get_logger(__name__)
a : Tuple = {
"facebook/nllb-moe-54B": "https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json",
}
class __UpperCamelCase ( snake_case__ ):
lowerCamelCase : Optional[int] ='nllb-moe'
lowerCamelCase : Any =['past_key_values']
lowerCamelCase : str ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , lowerCAmelCase__=12_8112 , lowerCAmelCase__=1024 , lowerCAmelCase__=12 , lowerCAmelCase__=4096 , lowerCAmelCase__=16 , lowerCAmelCase__=12 , lowerCAmelCase__=4096 , lowerCAmelCase__=16 , lowerCAmelCase__=0.05 , lowerCAmelCase__=0.05 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="relu" , lowerCAmelCase__=1024 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=2 , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__="float32" , lowerCAmelCase__=False , lowerCAmelCase__=128 , lowerCAmelCase__=64 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__=0.001 , lowerCAmelCase__=0.001 , lowerCAmelCase__="all" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=1.0 , lowerCAmelCase__=0.2 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , lowerCAmelCase__=False , **lowerCAmelCase__ , ) -> Any:
a : Optional[int] = vocab_size
a : Optional[int] = max_position_embeddings
a : Union[str, Any] = d_model
a : Optional[int] = encoder_ffn_dim
a : Dict = encoder_layers
a : List[str] = encoder_attention_heads
a : Dict = decoder_ffn_dim
a : Tuple = decoder_layers
a : Tuple = decoder_attention_heads
a : int = dropout
a : List[str] = attention_dropout
a : Tuple = activation_dropout
a : List[str] = activation_function
a : List[str] = init_std
a : List[Any] = encoder_layerdrop
a : Tuple = decoder_layerdrop
a : Union[str, Any] = use_cache
a : Tuple = encoder_layers
a : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
a : Union[str, Any] = router_z_loss_coef
a : Dict = router_aux_loss_coef
a : Optional[Any] = decoder_sparse_step
a : Any = encoder_sparse_step
a : Tuple = num_experts
a : Optional[int] = expert_capacity
a : Dict = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}""" )
a : Any = router_dtype
a : int = router_ignore_padding_tokens
a : Optional[int] = batch_prioritized_routing
a : Any = second_expert_policy
a : List[Any] = normalize_router_prob_before_dropping
a : Optional[int] = moe_eval_capacity_token_fraction
a : Optional[Any] = moe_token_dropout
a : Any = output_router_logits
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
| 633 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = collection[i]
lowerCAmelCase__ = 0
lowerCAmelCase__ = i - 1
while low <= high:
lowerCAmelCase__ = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase__ = mid - 1
else:
lowerCAmelCase__ = mid + 1
for j in range(lowercase__ , lowercase__ , -1 ):
lowerCAmelCase__ = collection[j - 1]
lowerCAmelCase__ = val
return collection
if __name__ == "__main__":
_UpperCAmelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase : Tuple = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 668 | 0 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class __lowerCAmelCase ( snake_case__ ):
"""simple docstring"""
A__ : Union[str, Any] = 'data2vec-audio'
def __init__( self : Union[str, Any] , _snake_case : str=32 , _snake_case : Any=7_68 , _snake_case : List[Any]=12 , _snake_case : List[str]=12 , _snake_case : int=30_72 , _snake_case : Dict="gelu" , _snake_case : Dict=0.1 , _snake_case : str=0.1 , _snake_case : Dict=0.1 , _snake_case : Dict=0.0 , _snake_case : Tuple=0.1 , _snake_case : Optional[Any]=0.1 , _snake_case : str=0.02 , _snake_case : Dict=1E-5 , _snake_case : List[Any]="gelu" , _snake_case : Union[str, Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , _snake_case : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , _snake_case : List[Any]=(10, 3, 3, 3, 3, 2, 2) , _snake_case : Optional[int]=False , _snake_case : Optional[Any]=16 , _snake_case : Tuple=19 , _snake_case : str=5 , _snake_case : Optional[int]=0.05 , _snake_case : Optional[Any]=10 , _snake_case : Any=2 , _snake_case : Dict=0.0 , _snake_case : int=10 , _snake_case : Tuple=0 , _snake_case : Dict="sum" , _snake_case : str=False , _snake_case : Optional[Any]=False , _snake_case : Optional[Any]=2_56 , _snake_case : Dict=(5_12, 5_12, 5_12, 5_12, 15_00) , _snake_case : Any=(5, 3, 3, 1, 1) , _snake_case : Tuple=(1, 2, 3, 1, 1) , _snake_case : Optional[int]=5_12 , _snake_case : Optional[Any]=0 , _snake_case : List[str]=1 , _snake_case : Optional[Any]=2 , _snake_case : str=False , _snake_case : Optional[Any]=3 , _snake_case : Any=2 , _snake_case : Union[str, Any]=3 , _snake_case : Optional[int]=None , **_snake_case : Optional[Any] , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ , pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ )
A__ = hidden_size
A__ = feat_extract_activation
A__ = list(SCREAMING_SNAKE_CASE_ )
A__ = list(SCREAMING_SNAKE_CASE_ )
A__ = list(SCREAMING_SNAKE_CASE_ )
A__ = conv_bias
A__ = num_conv_pos_embeddings
A__ = num_conv_pos_embedding_groups
A__ = conv_pos_kernel_size
A__ = len(self.conv_dim )
A__ = num_hidden_layers
A__ = intermediate_size
A__ = hidden_act
A__ = num_attention_heads
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = feat_proj_dropout
A__ = final_dropout
A__ = layerdrop
A__ = layer_norm_eps
A__ = initializer_range
A__ = vocab_size
A__ = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ = mask_time_prob
A__ = mask_time_length
A__ = mask_time_min_masks
A__ = mask_feature_prob
A__ = mask_feature_length
A__ = mask_feature_min_masks
# ctc loss
A__ = ctc_loss_reduction
A__ = ctc_zero_infinity
# adapter
A__ = add_adapter
A__ = adapter_kernel_size
A__ = adapter_stride
A__ = num_adapter_layers
A__ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A__ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A__ = list(SCREAMING_SNAKE_CASE_ )
A__ = list(SCREAMING_SNAKE_CASE_ )
A__ = list(SCREAMING_SNAKE_CASE_ )
A__ = xvector_output_dim
@property
def _a ( self : Union[str, Any] ):
"""simple docstring"""
return math.prod(self.conv_stride )
| 9 |
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ ) + 1
lowerCAmelCase__ = len(lowercase__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowerCAmelCase__ = [[0 for i in range(lowercase__ )] for j in range(lowercase__ )]
# since string of zero length match pattern of zero length
lowerCAmelCase__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowercase__ ):
lowerCAmelCase__ = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowercase__ ):
for j in range(1 , lowercase__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowerCAmelCase__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowerCAmelCase__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowerCAmelCase__ = dp[i - 1][j]
else:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCAmelCase : Union[str, Any] = "aab"
_UpperCAmelCase : Dict = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 668 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , ) -> List[Any]:
if config_name_or_path is None:
SCREAMING_SNAKE_CASE__ = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
SCREAMING_SNAKE_CASE__ = question_encoder_name_or_path
SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
SCREAMING_SNAKE_CASE__ = RagConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(lowercase__ )
SCREAMING_SNAKE_CASE__ = gen_config
SCREAMING_SNAKE_CASE__ = question_encoder_config
SCREAMING_SNAKE_CASE__ = model_class.from_pretrained_question_encoder_generator(
lowercase__ , lowercase__ , config=lowercase__ )
rag_model.save_pretrained(lowercase__ )
# Sanity check.
model_class.from_pretrained(lowercase__ )
# Save tokenizers.
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(lowercase__ )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(lowercase__ )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
_A = parser.parse_args()
_A = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 159 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "vocab.json"}
_UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
_UpperCAmelCase : Tuple = {"mgp-str": 27}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ :Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : Optional[Any]="[s]" , SCREAMING_SNAKE_CASE_ : Any="[GO]" , **SCREAMING_SNAKE_CASE_ : Dict ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __snake_case ( self : List[Any] ):
return len(self.vocab )
def __snake_case ( self : Optional[int] ):
return dict(self.vocab , **self.added_tokens_encoder )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return (vocab_file,)
| 668 | 0 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( snake_case__ ):
__lowerCAmelCase : Optional[Any] = (DDIMParallelScheduler,)
__lowerCAmelCase : List[str] = (('eta', 0.0), ('num_inference_steps', 50))
def __UpperCAmelCase ( self ,**__snake_case ):
"""simple docstring"""
A_ = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def __UpperCAmelCase ( self ,**__snake_case ):
"""simple docstring"""
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE_ )
A_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
A_ , A_ = 1_0, 0.0
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
for t in scheduler.timesteps:
A_ = model(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
A_ = scheduler.step(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ).prev_sample
return sample
def __UpperCAmelCase ( self ):
"""simple docstring"""
for timesteps in [1_0_0, 5_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=SCREAMING_SNAKE_CASE_ )
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config(steps_offset=1 )
A_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps ,torch.LongTensor([8_0_1, 6_0_1, 4_0_1, 2_0_1, 1] ) )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=SCREAMING_SNAKE_CASE_ ,beta_end=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE_ ,prediction_type=SCREAMING_SNAKE_CASE_ ,sample_max_value=SCREAMING_SNAKE_CASE_ ,)
def __UpperCAmelCase ( self ):
"""simple docstring"""
for t in [1, 1_0, 4_9]:
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 1_0, 5_0] ,[1_0, 5_0, 5_0_0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ ,num_inference_steps=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self ):
"""simple docstring"""
for t, eta in zip([1, 1_0, 4_9] ,[0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ ,eta=SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_2_0 ,4_0_0 ) - 0.1_4771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_8_0 ,9_6_0 ) - 0.3_2460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 ,0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ,4_8_6 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ,9_9_8 ) - 0.02 ) ) < 1E-5
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.scheduler_classes[0]
A_ = self.get_scheduler_config()
A_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
A_ , A_ = 1_0, 0.0
scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ )
A_ = self.dummy_model()
A_ = self.dummy_sample_deter
A_ = self.dummy_sample_deter + 0.1
A_ = self.dummy_sample_deter - 0.1
A_ = samplea.shape[0]
A_ = torch.stack([samplea, samplea, samplea] ,dim=0 )
A_ = torch.arange(SCREAMING_SNAKE_CASE_ )[0:3, None].repeat(1 ,SCREAMING_SNAKE_CASE_ )
A_ = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
A_ = scheduler.batch_step_no_noise(SCREAMING_SNAKE_CASE_ ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) ,SCREAMING_SNAKE_CASE_ )
A_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.full_loop()
A_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.22_3967 ) < 1E-3
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.full_loop(prediction_type='''v_prediction''' )
A_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ ,beta_start=0.01 )
A_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = self.full_loop(set_alpha_to_one=SCREAMING_SNAKE_CASE_ ,beta_start=0.01 )
A_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
A_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 188 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __UpperCamelCase (lowerCAmelCase : dict ) -> tuple:
return (data["data"], data["target"])
def __UpperCamelCase (lowerCAmelCase : np.ndarray, lowerCAmelCase : np.ndarray, lowerCAmelCase : np.ndarray ) -> np.ndarray:
A = XGBRegressor(verbosity=0, random_state=42 )
xgb.fit(lowercase__, lowercase__ )
# Predict target for test data
A = xgb.predict(lowercase__ )
A = predictions.reshape(len(lowercase__ ), 1 )
return predictions
def __UpperCamelCase () -> None:
A = fetch_california_housing()
A , A = data_handling(lowercase__ )
A , A , A , A = train_test_split(
lowercase__, lowercase__, test_size=0.25, random_state=1 )
A = xgboost(lowercase__, lowercase__, lowercase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowercase__, lowercase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowercase__, lowercase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 699 |
from collections import deque
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = process_name # process name
lowerCAmelCase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase__ = arrival_time
lowerCAmelCase__ = burst_time # remaining burst time
lowerCAmelCase__ = 0 # total time of the process wait in ready queue
lowerCAmelCase__ = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ):
# total number of mlfq's queues
lowerCAmelCase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase__ = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase__ = queue
# current time
lowerCAmelCase__ = current_time
# finished process is in this sequence queue
lowerCAmelCase__ = deque()
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
return [q.burst_time for q in queue]
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
lowerCAmelCase__ = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE_ ) != 0:
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase__ = 0
# set the process's turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase__ = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase__ = 0
# set the finish time
lowerCAmelCase__ = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __snake_case ( self : int ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCAmelCase : List[Any] = Process("P1", 0, 53)
_UpperCAmelCase : Tuple = Process("P2", 0, 17)
_UpperCAmelCase : int = Process("P3", 0, 68)
_UpperCAmelCase : str = Process("P4", 0, 24)
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : List[Any] = [17, 25]
_UpperCAmelCase : Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_UpperCAmelCase : Tuple = Process("P1", 0, 53)
_UpperCAmelCase : List[str] = Process("P2", 0, 17)
_UpperCAmelCase : Any = Process("P3", 0, 68)
_UpperCAmelCase : List[Any] = Process("P4", 0, 24)
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : int = [17, 25]
_UpperCAmelCase : str = deque([Pa, Pa, Pa, Pa])
_UpperCAmelCase : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCAmelCase : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 668 | 0 |
"""simple docstring"""
from functools import lru_cache
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowercase__ )
if n > 1:
factors.add(lowercase__ )
return factors
@lru_cache
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return len(unique_prime_factors(lowercase__ ) )
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return len(set(lowercase__ ) ) in (0, 1)
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 2
while True:
# Increment each value of a generated range
__SCREAMING_SNAKE_CASE = [base + i for i in range(lowercase__ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__SCREAMING_SNAKE_CASE = [upf_len(lowercase__ ) for x in group]
checker.append(lowercase__ )
# If all numbers in the list are equal, return the group variable.
if equality(lowercase__ ):
return group
# Increment our base variable by 1
base += 1
def UpperCAmelCase__ (lowerCAmelCase_ = 4 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = run(lowercase__ )
return results[0] if len(lowercase__ ) else None
if __name__ == "__main__":
print(solution())
| 682 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Tuple = "true"
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int=82 , lowercase__ : str=16 ) -> Tuple:
'''simple docstring'''
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(lowercase__ )
lowerCAmelCase__ = RegressionDataset(length=lowercase__ )
lowerCAmelCase__ = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ : Any ):
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ : Any ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
lowerCAmelCase__ = get_dataloader(lowercase__ , not dispatch_batches )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase__ = []
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
with torch.no_grad():
lowerCAmelCase__ = model(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=82 , lowercase__ : List[Any]=False , lowercase__ : Optional[int]=False , lowercase__ : Union[str, Any]=16 ) -> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'
def lowerCAmelCase_ (lowercase__ : bool = False , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ , lowerCAmelCase__ = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
lowerCAmelCase__ = metric.compute()
# Then do distributed
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ = batch['''labels''']
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
lowerCAmelCase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowerCAmelCase_ () -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ = Accelerator()
test_torch_metrics(lowercase__ , 5_12 )
accelerator.state._reset_state()
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : List[str] ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self : Any ) -> Any:
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = (32, 32)
SCREAMING_SNAKE_CASE__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
@property
def lowercase_ ( self : List[Any] ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def lowercase_ ( self : List[Any] ) -> Dict:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def lowercase_ ( self : Optional[int] ) -> Optional[Any]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE_ )
@property
def lowercase_ ( self : int ) -> str:
def extract(*__lowerCamelCase : Tuple , **__lowerCamelCase : Tuple ):
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = torch.ones([0] )
def lowercase_ ( self : List[str] , __lowerCamelCase : str ) -> Tuple:
self.pixel_values.to(SCREAMING_SNAKE_CASE_ )
return self
return Out()
return extract
def lowercase_ ( self : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = self.dummy_vae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE__ = 77
SCREAMING_SNAKE_CASE__ = self.dummy_image.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowercase_ ( self : int ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = self.dummy_vae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE__ = 77
SCREAMING_SNAKE_CASE__ = self.dummy_image.to(SCREAMING_SNAKE_CASE_ )
# put models in fp16
SCREAMING_SNAKE_CASE__ = unet.half()
SCREAMING_SNAKE_CASE__ = vae.half()
SCREAMING_SNAKE_CASE__ = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = alt_pipe.to(SCREAMING_SNAKE_CASE_ )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE__ = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='''np''' , image=SCREAMING_SNAKE_CASE_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def lowercase_ ( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE__ = init_image.resize((760, 504) )
SCREAMING_SNAKE_CASE__ = '''BAAI/AltDiffusion'''
SCREAMING_SNAKE_CASE__ = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , strength=0.75 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
SCREAMING_SNAKE_CASE__ = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
SCREAMING_SNAKE_CASE__ = init_image.resize((768, 512) )
SCREAMING_SNAKE_CASE__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
SCREAMING_SNAKE_CASE__ = '''BAAI/AltDiffusion'''
SCREAMING_SNAKE_CASE__ = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , safety_checker=SCREAMING_SNAKE_CASE_ , )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = '''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , strength=0.75 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE_ , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 493 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase : str = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_UpperCAmelCase : List[str] = {
"ctrl": 256,
}
_UpperCAmelCase : int = {
"Pregnancy": 168_629,
"Christianity": 7_675,
"Explain": 106_423,
"Fitness": 63_440,
"Saving": 63_163,
"Ask": 27_171,
"Ass": 95_985,
"Joke": 163_509,
"Questions": 45_622,
"Thoughts": 49_605,
"Retail": 52_342,
"Feminism": 164_338,
"Writing": 11_992,
"Atheism": 192_263,
"Netflix": 48_616,
"Computing": 39_639,
"Opinion": 43_213,
"Alone": 44_967,
"Funny": 58_917,
"Gaming": 40_358,
"Human": 4_088,
"India": 1_331,
"Joker": 77_138,
"Diet": 36_206,
"Legal": 11_859,
"Norman": 4_939,
"Tip": 72_689,
"Weight": 52_343,
"Movies": 46_273,
"Running": 23_425,
"Science": 2_090,
"Horror": 37_793,
"Confession": 60_572,
"Finance": 12_250,
"Politics": 16_360,
"Scary": 191_985,
"Support": 12_654,
"Technologies": 32_516,
"Teenage": 66_160,
"Event": 32_769,
"Learned": 67_460,
"Notion": 182_770,
"Wikipedia": 37_583,
"Books": 6_665,
"Extract": 76_050,
"Confessions": 102_701,
"Conspiracy": 75_932,
"Links": 63_674,
"Narcissus": 150_425,
"Relationship": 54_766,
"Relationships": 134_796,
"Reviews": 41_671,
"News": 4_256,
"Translation": 26_820,
"multilingual": 128_406,
}
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(lowercase__ )
return pairs
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Optional[int] = CONTROL_CODES
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , **SCREAMING_SNAKE_CASE_ : Tuple ):
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase__ = {}
@property
def __snake_case ( self : List[str] ):
return len(self.encoder )
def __snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Any ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCAmelCase__ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''@@ '''.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
return word
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowerCAmelCase__ = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 668 | 0 |
class lowerCamelCase_ :
def __init__( self : Optional[int] ):
__A : Dict = {}
def lowerCAmelCase_ ( self : Tuple ):
print(self.vertex )
for i in self.vertex:
print(SCREAMING_SNAKE_CASE_ , """ -> """ , """ -> """.join([str(SCREAMING_SNAKE_CASE_ ) for j in self.vertex[i]] ) )
def lowerCAmelCase_ ( self : Dict , __A : int , __A : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(SCREAMING_SNAKE_CASE_ )
else:
# else make a new vertex
__A : List[str] = [to_vertex]
def lowerCAmelCase_ ( self : Tuple ):
# visited array for storing already visited nodes
__A : Optional[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( self : Union[str, Any] , __A : int , __A : list ):
# mark start vertex as visited
__A : Any = True
print(SCREAMING_SNAKE_CASE_ , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 17 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCAmelCase_ :
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
raise NotImplementedError()
def __snake_case ( self : Union[str, Any] ):
raise NotImplementedError()
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = skip_prompt
lowerCAmelCase__ = decode_kwargs
# variables used in the streaming process
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
lowerCAmelCase__ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCAmelCase__ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
# If the last token is a CJK character, we print the characters.
elif len(SCREAMING_SNAKE_CASE_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCAmelCase__ = text[self.print_len :]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCAmelCase__ = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
self.on_finalized_text(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = True
self.on_finalized_text(SCREAMING_SNAKE_CASE_ , stream_end=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
print(SCREAMING_SNAKE_CASE_ , flush=SCREAMING_SNAKE_CASE_ , end='''''' if not stream_end else None )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[float] = None , **SCREAMING_SNAKE_CASE_ : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = Queue()
lowerCAmelCase__ = None
lowerCAmelCase__ = timeout
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
self.text_queue.put(SCREAMING_SNAKE_CASE_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[int] ):
return self
def __snake_case ( self : int ):
lowerCAmelCase__ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 668 | 0 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_UpperCAmelCase : int = Mapping[str, np.ndarray]
_UpperCAmelCase : Optional[Any] = Mapping[str, Any] # Is a nested dict.
_UpperCAmelCase : Optional[Any] = 0.01
@dataclasses.dataclass(frozen=snake_case__ )
class __magic_name__ :
UpperCamelCase__ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCamelCase__ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCamelCase__ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCamelCase__ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCamelCase__ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCamelCase__ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCamelCase__ = None
# Templates used to generate this protein (prediction-only)
UpperCamelCase__ = None
# Chain corresponding to each parent
UpperCamelCase__ = None
def UpperCamelCase ( lowercase_ : str ) -> Protein:
'''simple docstring'''
lowercase =R'''(\[[A-Z]+\]\n)'''
lowercase =[tag.strip() for tag in re.split(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0]
lowercase =zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowercase =['''N''', '''CA''', '''C''']
lowercase =None
lowercase =None
lowercase =None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase =g[1][0].strip()
for i in range(len(lowercase__ ) ):
if seq[i] not in residue_constants.restypes:
lowercase ='''X''' # FIXME: strings are immutable
lowercase =np.array(
[residue_constants.restype_order.get(lowercase__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase =[]
for axis in range(3 ):
tertiary.append(list(map(lowercase__ , g[1][axis].split() ) ) )
lowercase =np.array(lowercase__ )
lowercase =np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowercase =np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase =np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowercase =np.zeros(
(
len(lowercase__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowercase =1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowercase__ , atom_mask=lowercase__ , aatype=lowercase__ , residue_index=np.arange(len(lowercase__ ) ) , b_factors=lowercase__ , )
def UpperCamelCase ( lowercase_ : Protein , lowercase_ : int = 0 ) -> List[str]:
'''simple docstring'''
lowercase =[]
lowercase =prot.remark
if remark is not None:
pdb_headers.append(f'REMARK {remark}' )
lowercase =prot.parents
lowercase =prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase =[p for i, p in zip(lowercase__ , lowercase__ ) if i == chain_id]
if parents is None or len(lowercase__ ) == 0:
lowercase =['''N/A''']
pdb_headers.append(f'PARENT {" ".join(lowercase__ )}' )
return pdb_headers
def UpperCamelCase ( lowercase_ : Protein , lowercase_ : str ) -> str:
'''simple docstring'''
lowercase =[]
lowercase =pdb_str.split('''\n''' )
lowercase =prot.remark
if remark is not None:
out_pdb_lines.append(f'REMARK {remark}' )
lowercase =4_2
if prot.parents is not None and len(prot.parents ) > 0:
lowercase =[]
if prot.parents_chain_index is not None:
lowercase ={}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowercase__ ) , [] )
parent_dict[str(lowercase__ )].append(lowercase__ )
lowercase =max([int(lowercase__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase =parent_dict.get(str(lowercase__ ) , ['''N/A'''] )
parents_per_chain.append(lowercase__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase =[['''N/A''']]
def make_parent_line(lowercase_ : Sequence[str] ) -> str:
return f'PARENT {" ".join(lowercase__ )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase =0
for i, l in enumerate(lowercase__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowercase__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowercase__ ):
lowercase =parents_per_chain[chain_counter]
else:
lowercase =['''N/A''']
out_pdb_lines.append(make_parent_line(lowercase__ ) )
return "\n".join(lowercase__ )
def UpperCamelCase ( lowercase_ : Protein ) -> str:
'''simple docstring'''
lowercase =residue_constants.restypes + ['''X''']
def res_atoa(lowercase_ : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowercase =residue_constants.atom_types
lowercase =[]
lowercase =prot.atom_mask
lowercase =prot.aatype
lowercase =prot.atom_positions
lowercase =prot.residue_index.astype(np.intaa )
lowercase =prot.b_factors
lowercase =prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowercase =get_pdb_headers(lowercase__ )
if len(lowercase__ ) > 0:
pdb_lines.extend(lowercase__ )
lowercase =aatype.shape[0]
lowercase =1
lowercase =0
lowercase =string.ascii_uppercase
lowercase =None
# Add all atom sites.
for i in range(lowercase__ ):
lowercase =res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowercase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase ='''ATOM'''
lowercase =atom_name if len(lowercase__ ) == 4 else f' {atom_name}'
lowercase =''''''
lowercase =''''''
lowercase =1.0_0
lowercase =atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase =''''''
lowercase ='''A'''
if chain_index is not None:
lowercase =chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase =(
f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
f'{res_name_a:>3} {chain_tag:>1}'
f'{residue_index[i]:>4}{insertion_code:>1} '
f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
f'{occupancy:>6.2f}{b_factor:>6.2f} '
f'{element:>2}{charge:>2}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
lowercase =i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase =True
lowercase =chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase ='''TER'''
lowercase =(
f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowercase__ , lowercase__ ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(lowercase__ )
def UpperCamelCase ( lowercase_ : Protein ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def UpperCamelCase ( lowercase_ : FeatureDict , lowercase_ : ModelOutput , lowercase_ : Optional[np.ndarray] = None , lowercase_ : Optional[np.ndarray] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[Sequence[str]] = None , lowercase_ : Optional[Sequence[int]] = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=lowercase__ , remark=lowercase__ , parents=lowercase__ , parents_chain_index=lowercase__ , )
| 72 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Union[str, Any] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 668 | 0 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def UpperCamelCase__( UpperCamelCase__ : int , UpperCamelCase__ : Union[str, Any] )->Tuple:
A__ = Mock()
A__ = conn, Mock()
A__ = iter([1, None] )
A__ = lambda UpperCamelCase__ : next(lowercase__ )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=lowercase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 190 |
from __future__ import annotations
def lowerCAmelCase_ (lowercase__ : list[int] , lowercase__ : list[int] , lowercase__ : int ) -> tuple[float, list[float]]:
'''simple docstring'''
lowerCAmelCase__ = list(range(len(lowercase__ ) ) )
lowerCAmelCase__ = [v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
lowerCAmelCase__ = 0
lowerCAmelCase__ = [0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
lowerCAmelCase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCAmelCase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class __UpperCAmelCase ( snake_case__ ):
'''simple docstring'''
_UpperCamelCase = ['input_features', 'attention_mask']
def __init__( self : Dict , _lowercase : List[Any]=80 , _lowercase : Union[str, Any]=16_000 , _lowercase : Tuple=0.0 , _lowercase : Optional[Any]=10 , _lowercase : Tuple=25 , _lowercase : str="hamming_window" , _lowercase : Tuple=32_768.0 , _lowercase : Any=0.97 , _lowercase : int=1.0 , _lowercase : str=True , _lowercase : Optional[int]=True , _lowercase : Union[str, Any]=False , **_lowercase : List[Any] , ) -> List[Any]:
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
A_ = feature_size
A_ = sampling_rate
A_ = padding_value
A_ = hop_length
A_ = win_length
A_ = frame_signal_scale
A_ = preemphasis_coeff
A_ = mel_floor
A_ = normalize_means
A_ = normalize_vars
A_ = win_function
A_ = return_attention_mask
A_ = win_length * sampling_rate // 1_000
A_ = hop_length * sampling_rate // 1_000
A_ = optimal_fft_length(self.sample_size)
A_ = (self.n_fft // 2) + 1
def __snake_case ( self : Any , _lowercase : np.array) -> str:
if self.win_function == "hamming_window":
A_ = window_function(window_length=self.sample_size , name=self.win_function , periodic=SCREAMING_SNAKE_CASE_)
else:
A_ = window_function(window_length=self.sample_size , name=self.win_function)
A_ = mel_filter_bank(
num_frequency_bins=self.n_freqs , num_mel_filters=self.feature_size , min_frequency=0.0 , max_frequency=self.sampling_rate / 2.0 , sampling_rate=self.sampling_rate , )
A_ = spectrogram(
one_waveform * self.frame_signal_scale , window=SCREAMING_SNAKE_CASE_ , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , center=SCREAMING_SNAKE_CASE_ , preemphasis=self.preemphasis_coeff , mel_filters=SCREAMING_SNAKE_CASE_ , mel_floor=self.mel_floor , log_mel='log' , )
return msfc_features.T
def __snake_case ( self : str , _lowercase : Any , _lowercase : Union[str, Any] , _lowercase : List[str]) -> List[str]:
# make sure we normalize float32 arrays
if self.normalize_means:
A_ = x[:input_length].mean(axis=0)
A_ = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
if self.normalize_vars:
A_ = x[:input_length].std(axis=0)
A_ = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
if input_length < x.shape[0]:
A_ = padding_value
# make sure array is in float32
A_ = x.astype(np.floataa)
return x
def __snake_case ( self : List[Any] , _lowercase : List[np.ndarray] , _lowercase : Optional[np.ndarray] = None) -> Union[str, Any]:
A_ = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.padding_value) for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)]
def __call__( self : str , _lowercase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _lowercase : Union[bool, str, PaddingStrategy] = False , _lowercase : Optional[int] = None , _lowercase : bool = False , _lowercase : Optional[int] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[int] = None , **_lowercase : Optional[int] , ) -> int:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'It is strongly recommended to pass the ``sampling_rate`` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
A_ = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
A_ = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
A_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray):
A_ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa)
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
A_ = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
A_ = [raw_speech]
# extract fbank features
A_ = [self._extract_mfsc_features(SCREAMING_SNAKE_CASE_) for one_waveform in raw_speech]
# convert into correct format for padding
A_ = BatchFeature({'input_features': features})
A_ = self.pad(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# make sure list is in array format
A_ = padded_inputs.get('input_features')
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_):
A_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa) for feature in input_features]
A_ = padded_inputs.get('attention_mask')
if attention_mask is not None:
A_ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
A_ = (
np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa)
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
A_ = self.normalize(
padded_inputs['input_features'] , attention_mask=SCREAMING_SNAKE_CASE_)
if return_tensors is not None:
A_ = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_)
return padded_inputs
| 366 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Any:
'''simple docstring'''
if issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = parquet_path
elif issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = [parquet_path]
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[Any]=("train",) ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
lowerCAmelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader({'''train''': parquet_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
if split:
lowerCAmelCase__ = {split: parquet_path}
else:
lowerCAmelCase__ = '''train'''
lowerCAmelCase__ = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ (lowercase__ : Dict , lowercase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ = {'''image''': [image_path]}
lowerCAmelCase__ = Features({'''image''': Image()} )
lowerCAmelCase__ = Dataset.from_dict(lowercase__ , features=lowercase__ )
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : str ) -> Tuple:
'''simple docstring'''
assert get_writer_batch_size(lowercase__ ) == expected
| 668 | 0 |
"""simple docstring"""
import copy
import re
class __UpperCamelCase :
lowerCamelCase : int ='hp'
lowerCamelCase : Dict ={}
lowerCamelCase : List[Any] =None
@classmethod
def __a ( cls , lowerCAmelCase__ , lowerCAmelCase__ ) -> Union[str, Any]:
a : Tuple = prefix
a : Any = defaults
cls.build_naming_info()
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return ""
a : Union[str, Any] = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: \'{word}\' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(SCREAMING_SNAKE_CASE_ ) + 1 ):
a : Tuple = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
a : int = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCAmelCase__ ):
a : Optional[int] = ""
while integer != 0:
a : Dict = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
a : Tuple = 0
while True:
a : int = word + "#" + int_to_alphabetic(SCREAMING_SNAKE_CASE_ )
if sword in info["reverse_short_word"]:
continue
else:
a : Optional[Any] = sword
break
a : Optional[int] = short_word
a : int = word
return short_word
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : List[str] = param_name.split("_" )
a : int = [TrialShortNamer.shortname_for_word(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
a : str = ["", "_"]
for separator in separators:
a : Union[str, Any] = separator.join(SCREAMING_SNAKE_CASE_ )
if shortname not in info["reverse_short_param"]:
a : str = shortname
a : Union[str, Any] = param_name
return shortname
return param_name
@staticmethod
def __a ( lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : str = TrialShortNamer.shortname_for_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a : Optional[int] = short_name
a : Optional[int] = param_name
@classmethod
def __a ( cls ) -> Any:
if cls.NAMING_INFO is not None:
return
a : Union[str, Any] = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
a : List[Any] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a : int = info
@classmethod
def __a ( cls , lowerCAmelCase__ ) -> str:
cls.build_naming_info()
assert cls.PREFIX is not None
a : List[Any] = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
a : Union[str, Any] = cls.NAMING_INFO["short_param"][k]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
a : List[Any] = 1 if v else 0
a : Optional[Any] = "" if isinstance(SCREAMING_SNAKE_CASE_ , (int, float) ) else "-"
a : str = f"""{key}{sep}{v}"""
name.append(SCREAMING_SNAKE_CASE_ )
return "_".join(SCREAMING_SNAKE_CASE_ )
@classmethod
def __a ( cls , lowerCAmelCase__ ) -> List[Any]:
a : int = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
a : str = []
else:
a : Any = repr.split("_" )
a : Any = {}
for value in values:
if "-" in value:
a, a : Tuple = value.split("-" )
else:
a : str = re.sub("[0-9.]" , "" , SCREAMING_SNAKE_CASE_ )
a : Union[str, Any] = float(re.sub("[^0-9.]" , "" , SCREAMING_SNAKE_CASE_ ) )
a : Union[str, Any] = cls.NAMING_INFO["reverse_short_param"][p_k]
a : Any = p_v
for k in cls.DEFAULTS:
if k not in parameters:
a : Union[str, Any] = cls.DEFAULTS[k]
return parameters
| 633 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Union[str, Any] = {
"camembert-base": 512,
}
_UpperCAmelCase : Dict = "▁"
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Dict = ['input_ids', 'attention_mask']
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : int=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase__ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowerCAmelCase__ = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : List[Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __snake_case ( self : int ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : Optional[Any] ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 668 | 0 |
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , _snake_case : Tuple , _snake_case : List[Any]=13 , _snake_case : Any=7 , _snake_case : int=True , _snake_case : str=True , _snake_case : Any=True , _snake_case : Tuple=True , _snake_case : Any=99 , _snake_case : Union[str, Any]=32 , _snake_case : List[str]=5 , _snake_case : Dict=4 , _snake_case : Dict=37 , _snake_case : Optional[int]="gelu" , _snake_case : Union[str, Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Any=5_12 , _snake_case : Union[str, Any]=16 , _snake_case : Optional[int]=2 , _snake_case : str=0.02 , _snake_case : List[Any]=4 , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_choices
def _a ( self : int ):
"""simple docstring"""
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = None
if self.use_attention_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self : int ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ = config_and_inputs
A__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
A__ : Any = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : List[str] ):
"""simple docstring"""
A__ = FlaxAlbertModelTester(self )
@slow
def _a ( self : Optional[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained('albert-base-v2' )
A__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self : List[Any] ):
"""simple docstring"""
A__ = FlaxAlbertModel.from_pretrained('albert-base-v2' )
A__ = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
A__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
A__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
A__ = (1, 11, 7_68)
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
A__ = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 9 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_UpperCAmelCase : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
UpperCamelCase_ :int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the training data.'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
UpperCamelCase_ :Optional[str] = field(default=snake_case__ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __snake_case ( self : Union[str, Any] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCAmelCase__ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase__ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :str = field(
default=snake_case__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
UpperCamelCase_ :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCAmelCase_ () -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase__ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase__ = data_args.train_file.split('''.''' )[-1]
lowerCAmelCase__ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase__ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCAmelCase__ = load_dataset('''csv''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase__ = load_dataset('''json''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase__ = raw_datasets['''train'''].features['''label'''].names
lowerCAmelCase__ = len(lowercase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase__ , )
lowerCAmelCase__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase__ = {'''Refused''': 0, '''Entailed''': 1}
lowerCAmelCase__ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowerCAmelCase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase__ : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase__ : Dict ):
lowerCAmelCase__ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCAmelCase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase__ = examples['''statement''']
lowerCAmelCase__ = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowerCAmelCase__ = tokenizer(lowercase__ , lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ )
lowerCAmelCase__ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCAmelCase__ = raw_datasets.map(
lowercase__ , batched=lowercase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCAmelCase__ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCAmelCase__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCAmelCase__ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCAmelCase__ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCAmelCase__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase__ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase__ : EvalPrediction ):
lowerCAmelCase__ = p.predictions[0] if isinstance(p.predictions , lowercase__ ) else p.predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ = None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase__ , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=lowercase__ )
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowercase__ )
trainer.save_metrics('''train''' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase__ = trainer.evaluate(eval_dataset=lowercase__ )
lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('''eval''' , lowercase__ )
trainer.save_metrics('''eval''' , lowercase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase__ = predict_dataset.remove_columns('''label''' )
lowerCAmelCase__ = trainer.predict(lowercase__ , metric_key_prefix='''predict''' ).predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
lowerCAmelCase__ = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowercase__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowercase__ ):
lowerCAmelCase__ = label_list[item]
writer.write(f'{index}\t{item}\n' )
lowerCAmelCase__ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class lowerCamelCase (snake_case__ ):
'''simple docstring'''
a = 'transfo-xl'
a = ['mems']
a = {
'n_token': 'vocab_size',
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : List[str] , _snake_case : Union[str, Any]=267735 , _snake_case : Optional[Any]=[20000, 40000, 200000] , _snake_case : Optional[int]=1024 , _snake_case : Tuple=1024 , _snake_case : Optional[Any]=16 , _snake_case : str=64 , _snake_case : Dict=4096 , _snake_case : str=4 , _snake_case : List[str]=False , _snake_case : Dict=18 , _snake_case : Dict=1600 , _snake_case : List[Any]=1000 , _snake_case : List[str]=True , _snake_case : List[Any]=True , _snake_case : Dict=0 , _snake_case : Union[str, Any]=-1 , _snake_case : Dict=True , _snake_case : Union[str, Any]=0.1 , _snake_case : Optional[int]=0.0 , _snake_case : str=True , _snake_case : Union[str, Any]="normal" , _snake_case : str=0.01 , _snake_case : Optional[int]=0.01 , _snake_case : List[str]=0.02 , _snake_case : Optional[int]=1e-5 , _snake_case : List[Any]=0 , **_snake_case : Any , ) -> Dict:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = []
self.cutoffs.extend(SCREAMING_SNAKE_CASE_ )
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE__ = [False] + [True] * len(self.cutoffs )
else:
SCREAMING_SNAKE_CASE__ = [False] + [False] * len(self.cutoffs )
SCREAMING_SNAKE_CASE__ = d_model
SCREAMING_SNAKE_CASE__ = d_embed
SCREAMING_SNAKE_CASE__ = d_head
SCREAMING_SNAKE_CASE__ = d_inner
SCREAMING_SNAKE_CASE__ = div_val
SCREAMING_SNAKE_CASE__ = pre_lnorm
SCREAMING_SNAKE_CASE__ = n_layer
SCREAMING_SNAKE_CASE__ = n_head
SCREAMING_SNAKE_CASE__ = mem_len
SCREAMING_SNAKE_CASE__ = same_length
SCREAMING_SNAKE_CASE__ = attn_type
SCREAMING_SNAKE_CASE__ = clamp_len
SCREAMING_SNAKE_CASE__ = sample_softmax
SCREAMING_SNAKE_CASE__ = adaptive
SCREAMING_SNAKE_CASE__ = dropout
SCREAMING_SNAKE_CASE__ = dropatt
SCREAMING_SNAKE_CASE__ = untie_r
SCREAMING_SNAKE_CASE__ = init
SCREAMING_SNAKE_CASE__ = init_range
SCREAMING_SNAKE_CASE__ = proj_init_std
SCREAMING_SNAKE_CASE__ = init_std
SCREAMING_SNAKE_CASE__ = layer_norm_epsilon
super().__init__(eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def lowerCAmelCase_ ( self : List[Any] ) -> Dict:
# Message copied from Transformer-XL documentation
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def lowerCAmelCase_ ( self : Any , _snake_case : List[str] ) -> List[str]:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 159 |
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : int ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowercase__ ) , lowercase__ )
return number - int(lowercase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 668 | 0 |
import itertools
import math
def UpperCAmelCase_ ( _UpperCAmelCase :int ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCAmelCase_ ( ) -> Tuple:
'''simple docstring'''
A_ = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def UpperCAmelCase_ ( _UpperCAmelCase :int = 10001 ) -> int:
'''simple docstring'''
return next(itertools.islice(prime_generator() , nth - 1 , lowercase__ ) )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 188 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase_ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str]=13 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Any=99 , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2] , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE_ : int=37 , SCREAMING_SNAKE_CASE_ : str="gelu_new" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=False , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase__ = TFFunnelForPreTraining(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = TFFunnelForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ :Optional[int] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Tuple = False
def __snake_case ( self : int ):
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
self.config_tester.run_common_tests()
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@require_tf
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ :Optional[Any] = False
UpperCamelCase_ :Any = False
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = TFFunnelModelTester(self , base=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
| 668 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_UpperCAmelCase = logging.get_logger(__name__)
def __UpperCamelCase (lowerCAmelCase : bool, lowerCAmelCase : bool ) -> List[Any]:
def run_func(lowerCAmelCase : str ):
@wraps(lowercase__ )
def run_in_eager_mode(*lowerCAmelCase : Optional[int], **lowerCAmelCase : str ):
return func(*lowercase__, **lowercase__ )
@wraps(lowercase__ )
@tf.function(experimental_compile=lowercase__ )
def run_in_graph_mode(*lowerCAmelCase : Any, **lowerCAmelCase : Dict ):
return func(*lowercase__, **lowercase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
'Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.' )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int, lowerCAmelCase : int ) -> ["tf.Tensor"]:
A = random.Random()
A = [rng.randint(0, vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(lowercase__, shape=(batch_size, sequence_length), dtype=tf.intaa )
class _UpperCAmelCase ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : TensorFlowBenchmarkArguments
SCREAMING_SNAKE_CASE : PretrainedConfig
SCREAMING_SNAKE_CASE : str = "TensorFlow"
@property
def UpperCamelCase ( self : List[str] ):
return tf.__version__
def UpperCamelCase ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
# initialize GPU on separate process
A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
A = self._prepare_inference_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_speed(_inference )
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
A = self._prepare_train_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_speed(_train )
def UpperCamelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE_ )
A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
A = self._prepare_inference_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_memory(_inference )
def UpperCamelCase ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , SCREAMING_SNAKE_CASE_ )
A = self.args.strategy
if strategy is None:
raise ValueError('A device strategy has to be initialized before using TensorFlow.' )
A = self._prepare_train_func(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._measure_memory(_train )
def UpperCamelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
A = (
hasattr(SCREAMING_SNAKE_CASE_ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
A = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
A = __import__('transformers' , fromlist=[model_class] )
A = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A = model_cls(SCREAMING_SNAKE_CASE_ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
A = TF_MODEL_MAPPING[config.__class__](SCREAMING_SNAKE_CASE_ )
# encoder-decoder has vocab size saved differently
A = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE_ , 'vocab_size' ) else config.encoder.vocab_size
A = random_input_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )
A = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ):
A = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError('Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.' )
if self.args.fpaa:
raise NotImplementedError('Mixed precision is currently not supported.' )
A = (
hasattr(SCREAMING_SNAKE_CASE_ , 'architectures' )
and isinstance(config.architectures , SCREAMING_SNAKE_CASE_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
A = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model
A = __import__('transformers' , fromlist=[model_class] )
A = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A = model_cls(SCREAMING_SNAKE_CASE_ )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
' set `--only_pretrain_model` or `args.only_pretrain_model=True`.' )
else:
A = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](SCREAMING_SNAKE_CASE_ )
# encoder-decoder has vocab size saved differently
A = config.vocab_size if hasattr(SCREAMING_SNAKE_CASE_ , 'vocab_size' ) else config.encoder.vocab_size
A = random_input_ids(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
A = model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )[0]
A = tf.gradients(SCREAMING_SNAKE_CASE_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
A = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , training=SCREAMING_SNAKE_CASE_ )[0]
A = tf.gradients(SCREAMING_SNAKE_CASE_ , model.trainable_variables )
return gradients
A = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def UpperCamelCase ( self : List[Any] , UpperCamelCase__ : Tuple ):
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info('Do inference on TPU. Running model 5 times to stabilize compilation' )
timeit.repeat(SCREAMING_SNAKE_CASE_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
A = timeit.repeat(
SCREAMING_SNAKE_CASE_ , repeat=self.args.repeat , number=10 , )
return min(SCREAMING_SNAKE_CASE_ ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def UpperCamelCase ( self : List[str] , UpperCamelCase__ : Callable[[], None] ):
logger.info(
'Note that TensorFlow allocates more memory than '
'it might need to speed up computation. '
'The memory reported here corresponds to the memory '
'reported by `nvidia-smi`, which can vary depending '
'on total available memory on the GPU that is used.' )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
'`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory'
' consumption line by line.' )
A = start_memory_tracing('transformers' )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
'Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking'
' with `args.memory=False`' )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
'py3nvml not installed, we won\'t log GPU memory usage. '
'Install py3nvml (pip install py3nvml) to log information about GPU.' )
A = 'N/A'
else:
logger.info(
'Measuring total GPU usage on GPU device. Make sure to not have additional processes'
' running on the same GPU.' )
# init nvml
nvml.nvmlInit()
func()
A = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
A = nvml.nvmlDeviceGetMemoryInfo(SCREAMING_SNAKE_CASE_ )
A = meminfo.used
A = Memory(SCREAMING_SNAKE_CASE_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
'When enabling line by line tracing, the max peak memory for CPU is inaccurate in'
' TensorFlow.' )
A = None
else:
A = measure_peak_memory_cpu(SCREAMING_SNAKE_CASE_ )
A = Memory(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
A = stop_memory_tracing(SCREAMING_SNAKE_CASE_ )
if memory is None:
A = summary.total
else:
A = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 699 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_UpperCAmelCase : int = Mapping[str, np.ndarray]
_UpperCAmelCase : Optional[Any] = Mapping[str, Any] # Is a nested dict.
_UpperCAmelCase : Optional[Any] = 0.01
@dataclasses.dataclass(frozen=snake_case__ )
class lowerCAmelCase_ :
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCamelCase_ :np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCamelCase_ :np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCamelCase_ :Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCamelCase_ :Optional[str] = None
# Templates used to generate this protein (prediction-only)
UpperCamelCase_ :Optional[Sequence[str]] = None
# Chain corresponding to each parent
UpperCamelCase_ :Optional[Sequence[int]] = None
def lowerCAmelCase_ (lowercase__ : str ) -> Protein:
'''simple docstring'''
lowerCAmelCase__ = r'''(\[[A-Z]+\]\n)'''
lowerCAmelCase__ = [tag.strip() for tag in re.split(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0]
lowerCAmelCase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowerCAmelCase__ = ["N", "CA", "C"]
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowerCAmelCase__ = g[1][0].strip()
for i in range(len(lowercase__ ) ):
if seq[i] not in residue_constants.restypes:
lowerCAmelCase__ = '''X''' # FIXME: strings are immutable
lowerCAmelCase__ = np.array(
[residue_constants.restype_order.get(lowercase__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowerCAmelCase__ = []
for axis in range(3 ):
tertiary.append(list(map(lowercase__ , g[1][axis].split() ) ) )
lowerCAmelCase__ = np.array(lowercase__ )
lowerCAmelCase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowerCAmelCase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowerCAmelCase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowerCAmelCase__ = np.zeros(
(
len(lowercase__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowerCAmelCase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowercase__ , atom_mask=lowercase__ , aatype=lowercase__ , residue_index=np.arange(len(lowercase__ ) ) , b_factors=lowercase__ , )
def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : int = 0 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.remark
if remark is not None:
pdb_headers.append(f'REMARK {remark}' )
lowerCAmelCase__ = prot.parents
lowerCAmelCase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowerCAmelCase__ = [p for i, p in zip(lowercase__ , lowercase__ ) if i == chain_id]
if parents is None or len(lowercase__ ) == 0:
lowerCAmelCase__ = ['''N/A''']
pdb_headers.append(f'PARENT {" ".join(lowercase__ )}' )
return pdb_headers
def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = pdb_str.split('''\n''' )
lowerCAmelCase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f'REMARK {remark}' )
lowerCAmelCase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowerCAmelCase__ = []
if prot.parents_chain_index is not None:
lowerCAmelCase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowercase__ ) , [] )
parent_dict[str(lowercase__ )].append(lowercase__ )
lowerCAmelCase__ = max([int(lowercase__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowerCAmelCase__ = parent_dict.get(str(lowercase__ ) , ['''N/A'''] )
parents_per_chain.append(lowercase__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowerCAmelCase__ = [['''N/A''']]
def make_parent_line(lowercase__ : Sequence[str] ) -> str:
return f'PARENT {" ".join(lowercase__ )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowerCAmelCase__ = 0
for i, l in enumerate(lowercase__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowercase__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowercase__ ):
lowerCAmelCase__ = parents_per_chain[chain_counter]
else:
lowerCAmelCase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(lowercase__ ) )
return "\n".join(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Protein ) -> str:
'''simple docstring'''
lowerCAmelCase__ = residue_constants.restypes + ['''X''']
def res_atoa(lowercase__ : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowerCAmelCase__ = residue_constants.atom_types
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.atom_mask
lowerCAmelCase__ = prot.aatype
lowerCAmelCase__ = prot.atom_positions
lowerCAmelCase__ = prot.residue_index.astype(np.intaa )
lowerCAmelCase__ = prot.b_factors
lowerCAmelCase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowerCAmelCase__ = get_pdb_headers(lowercase__ )
if len(lowercase__ ) > 0:
pdb_lines.extend(lowercase__ )
lowerCAmelCase__ = aatype.shape[0]
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = string.ascii_uppercase
lowerCAmelCase__ = None
# Add all atom sites.
for i in range(lowercase__ ):
lowerCAmelCase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowercase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowerCAmelCase__ = '''ATOM'''
lowerCAmelCase__ = atom_name if len(lowercase__ ) == 4 else f' {atom_name}'
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 1.00
lowerCAmelCase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = '''A'''
if chain_index is not None:
lowerCAmelCase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowerCAmelCase__ = (
f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
f'{res_name_a:>3} {chain_tag:>1}'
f'{residue_index[i]:>4}{insertion_code:>1} '
f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
f'{occupancy:>6.2f}{b_factor:>6.2f} '
f'{element:>2}{charge:>2}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
lowerCAmelCase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowerCAmelCase__ = True
lowerCAmelCase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowerCAmelCase__ = '''TER'''
lowerCAmelCase__ = (
f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowercase__ , lowercase__ ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Protein ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCAmelCase_ (lowercase__ : FeatureDict , lowercase__ : ModelOutput , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[str] = None , lowercase__ : Optional[Sequence[str]] = None , lowercase__ : Optional[Sequence[int]] = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=lowercase__ , remark=lowercase__ , parents=lowercase__ , parents_chain_index=lowercase__ , )
| 668 | 0 |
"""simple docstring"""
from math import factorial
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(lowercase__ , lowercase__ ) or not isinstance(lowercase__ , lowercase__ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
__SCREAMING_SNAKE_CASE = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__SCREAMING_SNAKE_CASE = float(factorial(lowercase__ ) )
coefficient /= factorial(lowercase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 682 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCAmelCase : Optional[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCAmelCase_ (lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : int ) -> int:
'''simple docstring'''
if exitstatus == 5:
lowerCAmelCase__ = 0
# Doctest custom flag to ignore output.
_UpperCAmelCase : Any = doctest.register_optionflag("IGNORE_RESULT")
_UpperCAmelCase : Dict = doctest.OutputChecker
class lowerCAmelCase_ ( snake_case__ ):
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Union[str, Any] = CustomOutputChecker
_UpperCAmelCase : Dict = HfDoctestModule
_UpperCAmelCase : List[str] = HfDocTestParser
| 668 | 0 |
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"Pillow": "Pillow<10.0.0",
"accelerate": "accelerate>=0.20.3",
"av": "av==9.2.0",
"beautifulsoup4": "beautifulsoup4",
"black": "black~=23.1",
"codecarbon": "codecarbon==1.2.0",
"cookiecutter": "cookiecutter==1.7.3",
"dataclasses": "dataclasses",
"datasets": "datasets!=2.5.0",
"decord": "decord==0.6.0",
"deepspeed": "deepspeed>=0.9.3",
"diffusers": "diffusers",
"dill": "dill<0.3.5",
"evaluate": "evaluate>=0.2.0",
"fairscale": "fairscale>0.3",
"faiss-cpu": "faiss-cpu",
"fastapi": "fastapi",
"filelock": "filelock",
"flax": "flax>=0.4.1,<=0.7.0",
"ftfy": "ftfy",
"fugashi": "fugashi>=1.0",
"GitPython": "GitPython<3.1.19",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.14.1,<1.0",
"importlib_metadata": "importlib_metadata",
"ipadic": "ipadic>=1.0.0,<2.0",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2,<=0.4.13",
"jaxlib": "jaxlib>=0.1.65,<=0.4.13",
"jieba": "jieba",
"kenlm": "kenlm",
"keras-nlp": "keras-nlp>=0.3.1",
"librosa": "librosa",
"nltk": "nltk",
"natten": "natten>=0.14.6",
"numpy": "numpy>=1.17",
"onnxconverter-common": "onnxconverter-common",
"onnxruntime-tools": "onnxruntime-tools>=1.4.2",
"onnxruntime": "onnxruntime>=1.4.0",
"opencv-python": "opencv-python",
"optuna": "optuna",
"optax": "optax>=0.0.8,<=0.1.4",
"packaging": "packaging>=20.0",
"parameterized": "parameterized",
"phonemizer": "phonemizer",
"protobuf": "protobuf",
"psutil": "psutil",
"pyyaml": "pyyaml>=5.1",
"pydantic": "pydantic<2",
"pytest": "pytest>=7.2.0",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"python": "python>=3.8.0",
"ray[tune]": "ray[tune]",
"regex": "regex!=2019.12.17",
"requests": "requests",
"rhoknp": "rhoknp>=1.1.0,<1.3.1",
"rjieba": "rjieba",
"rouge-score": "rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1",
"ruff": "ruff>=0.0.241,<=0.0.259",
"sacrebleu": "sacrebleu>=1.4.12,<2.0.0",
"sacremoses": "sacremoses",
"safetensors": "safetensors>=0.3.1",
"sagemaker": "sagemaker>=2.31.0",
"scikit-learn": "scikit-learn",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"sigopt": "sigopt",
"starlette": "starlette",
"sudachipy": "sudachipy>=0.6.6",
"sudachidict_core": "sudachidict_core>=20220729",
"tensorflow-cpu": "tensorflow-cpu>=2.6,<2.14",
"tensorflow": "tensorflow>=2.6,<2.14",
"tensorflow-text": "tensorflow-text<2.14",
"tf2onnx": "tf2onnx",
"timeout-decorator": "timeout-decorator",
"timm": "timm",
"tokenizers": "tokenizers>=0.11.1,!=0.11.3,<0.14",
"torch": "torch>=1.9,!=1.12.0",
"torchaudio": "torchaudio",
"torchvision": "torchvision",
"pyctcdecode": "pyctcdecode>=0.4.0",
"tqdm": "tqdm>=4.27",
"unidic": "unidic>=1.0.2",
"unidic_lite": "unidic_lite>=1.0.7",
"urllib3": "urllib3<2.0.0",
"uvicorn": "uvicorn",
}
| 493 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase__ , lowerCAmelCase__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 668 | 0 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCAmelCase_ : Tuple = "true"
def __SCREAMING_SNAKE_CASE ( a__ : int ,a__ : int=82 ,a__ : str=16 ) -> Tuple:
set_seed(42 )
__A : Union[str, Any] = RegressionModel()
__A : Union[str, Any] = deepcopy(lowercase__ )
__A : List[Any] = RegressionDataset(length=lowercase__ )
__A : List[str] = DataLoader(lowercase__ ,batch_size=lowercase__ )
model.to(accelerator.device )
__A , __A : List[Any] = accelerator.prepare(lowercase__ ,lowercase__ )
return model, ddp_model, dataloader
def __SCREAMING_SNAKE_CASE ( a__ : Accelerator ,a__ : Optional[Any]=False ) -> int:
__A : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
__A : int = load_dataset("""glue""" ,"""mrpc""" ,split="""validation""" )
def tokenize_function(a__ : Any ):
__A : Union[str, Any] = tokenizer(examples["""sentence1"""] ,examples["""sentence2"""] ,truncation=lowercase__ ,max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
__A : Union[str, Any] = dataset.map(
lowercase__ ,batched=lowercase__ ,remove_columns=["""idx""", """sentence1""", """sentence2"""] ,)
__A : Dict = tokenized_datasets.rename_column("""label""" ,"""labels""" )
def collate_fn(a__ : Any ):
if use_longest:
return tokenizer.pad(lowercase__ ,padding="""longest""" ,return_tensors="""pt""" )
return tokenizer.pad(lowercase__ ,padding="""max_length""" ,max_length=128 ,return_tensors="""pt""" )
return DataLoader(lowercase__ ,shuffle=lowercase__ ,collate_fn=lowercase__ ,batch_size=16 )
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Dict ) -> Union[str, Any]:
__A : Optional[int] = Accelerator(dispatch_batches=lowercase__ ,split_batches=lowercase__ )
__A : Tuple = get_dataloader(lowercase__ ,not dispatch_batches )
__A : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" ,return_dict=lowercase__ )
__A , __A : Tuple = accelerator.prepare(lowercase__ ,lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __SCREAMING_SNAKE_CASE ( a__ : List[str] ,a__ : List[str] ,a__ : Tuple ) -> int:
__A : Dict = []
for batch in dataloader:
__A , __A : Tuple = batch.values()
with torch.no_grad():
__A : Tuple = model(lowercase__ )
__A , __A : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__A , __A : Tuple = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
__A , __A : Optional[Any] = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def __SCREAMING_SNAKE_CASE ( a__ : Accelerator ,a__ : Optional[Any]=82 ,a__ : List[Any]=False ,a__ : Optional[int]=False ,a__ : Union[str, Any]=16 ) -> int:
__A , __A , __A : Union[str, Any] = get_basic_setup(lowercase__ ,lowercase__ ,lowercase__ )
__A , __A : Optional[int] = generate_predictions(lowercase__ ,lowercase__ ,lowercase__ )
assert (
len(lowercase__ ) == num_samples
), f"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}"""
def __SCREAMING_SNAKE_CASE ( a__ : bool = False ,a__ : bool = False ) -> int:
__A : Dict = evaluate.load("""glue""" ,"""mrpc""" )
__A , __A : Optional[int] = get_mrpc_setup(lowercase__ ,lowercase__ )
# First do baseline
__A , __A , __A : Optional[Any] = setup["""no"""]
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
__A : Tuple = model(**lowercase__ )
__A : List[str] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ ,references=batch["""labels"""] )
__A : str = metric.compute()
# Then do distributed
__A , __A , __A : List[Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
__A : Dict = model(**lowercase__ )
__A : Dict = outputs.logits.argmax(dim=-1 )
__A : Optional[int] = batch["""labels"""]
__A , __A : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ ,references=lowercase__ )
__A : Any = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__A : Any = Accelerator(split_batches=lowercase__ ,dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(lowercase__ ,lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__A : Union[str, Any] = Accelerator(split_batches=lowercase__ ,dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(f"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(lowercase__ ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
__A : Optional[Any] = Accelerator()
test_torch_metrics(lowercase__ ,512 )
accelerator.state._reset_state()
def __SCREAMING_SNAKE_CASE ( a__ : Optional[int] ) -> List[str]:
main()
if __name__ == "__main__":
main()
| 17 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=13 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=99 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Any=16 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : int=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Tuple ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ :Union[str, Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :int = True
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :List[Any] = True
UpperCamelCase_ :Dict = True
def __snake_case ( self : Dict ):
lowerCAmelCase__ = DistilBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def __snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Tuple ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
lowerCAmelCase__ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : str ):
lowerCAmelCase__ = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 668 | 0 |
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 72 |
from typing import Any
def lowerCAmelCase_ (lowercase__ : list , lowercase__ : list , lowercase__ : dict , lowercase__ : dict , lowercase__ : dict , ) -> list:
'''simple docstring'''
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for state in states_space:
lowerCAmelCase__ = observations_space[0]
lowerCAmelCase__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
lowerCAmelCase__ = observations_space[o]
lowerCAmelCase__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
# Update probabilities and pointers dicts
lowerCAmelCase__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase__ = arg_max
# The final observation
lowerCAmelCase__ = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
lowerCAmelCase__ = arg_max
# Process pointers backwards
lowerCAmelCase__ = last_state
lowerCAmelCase__ = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
lowerCAmelCase__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any ) -> None:
'''simple docstring'''
_validate_list(lowercase__ , '''observations_space''' )
_validate_list(lowercase__ , '''states_space''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list'
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list of strings'
raise ValueError(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_dict(lowercase__ , '''initial_probabilities''' , lowercase__ )
_validate_nested_dict(lowercase__ , '''transition_probabilities''' )
_validate_nested_dict(lowercase__ , '''emission_probabilities''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str , lowercase__ : type , lowercase__ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a dict'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
lowerCAmelCase__ = f'{var_name} all keys must be strings'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
lowerCAmelCase__ = '''nested dictionary ''' if nested else ''''''
lowerCAmelCase__ = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 668 | 0 |
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,):
A__ = parent
A__ = 13
A__ = 7
A__ = True
A__ = True
A__ = True
A__ = 99
A__ = 32
A__ = 2
A__ = 4
A__ = 37
A__ = '''gelu'''
A__ = 0.1
A__ = 0.1
A__ = 512
A__ = 16
A__ = 2
A__ = 0.02
A__ = 3
A__ = 4
A__ = None
def UpperCamelCase ( self ):
A__ = ids_tensor([self.batch_size, self.seq_length],self.vocab_size )
A__ = None
if self.use_input_mask:
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length],self.num_labels )
A__ = ids_tensor([self.batch_size],self.num_choices )
A__ = EsmConfig(
vocab_size=self.vocab_size,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,pad_token_id=1,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,max_position_embeddings=self.max_position_embeddings,type_vocab_size=self.type_vocab_size,initializer_range=self.initializer_range,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self ):
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = self.prepare_config_and_inputs()
A__ = True
A__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ = ids_tensor([self.batch_size, self.seq_length],vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = TFEsmModel(config=SCREAMING_SNAKE_CASE_ )
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
A__ = model(SCREAMING_SNAKE_CASE_ )
A__ = [input_ids, input_mask]
A__ = model(SCREAMING_SNAKE_CASE_ )
A__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
A__ = True
A__ = TFEsmModel(config=SCREAMING_SNAKE_CASE_ )
A__ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''encoder_hidden_states''': encoder_hidden_states,
'''encoder_attention_mask''': encoder_attention_mask,
}
A__ = model(SCREAMING_SNAKE_CASE_ )
A__ = [input_ids, input_mask]
A__ = model(SCREAMING_SNAKE_CASE_,encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
# Also check the case where encoder outputs are not passed
A__ = model(SCREAMING_SNAKE_CASE_,attention_mask=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = TFEsmForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
A__ = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = self.num_labels
A__ = TFEsmForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
A__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self ):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( snake_case__ , snake_case__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{
'feature-extraction': TFEsmModel,
'fill-mask': TFEsmForMaskedLM,
'text-classification': TFEsmForSequenceClassification,
'token-classification': TFEsmForTokenClassification,
'zero-shot': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
A__ = TFEsmModelTester(self )
A__ = ConfigTester(self,config_class=SCREAMING_SNAKE_CASE_,hidden_size=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFEsmModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCamelCase ( self ):
pass
@unittest.skip('''Protein models do not support embedding resizing.''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(SCREAMING_SNAKE_CASE_ )
assert isinstance(model.get_input_embeddings(),tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A__ = model.get_bias()
assert isinstance(SCREAMING_SNAKE_CASE_,SCREAMING_SNAKE_CASE_ )
for k, v in name.items():
assert isinstance(SCREAMING_SNAKE_CASE_,tf.Variable )
else:
A__ = model.get_output_embeddings()
assert x is None
A__ = model.get_bias()
assert name is None
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self ):
A__ = TFEsmForMaskedLM.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
A__ = tf.constant([[0, 1, 2, 3, 4, 5]] )
A__ = model(SCREAMING_SNAKE_CASE_ )[0]
A__ = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ),SCREAMING_SNAKE_CASE_ )
# compare the actual values for a slice.
A__ = tf.constant(
[
[
[8.921518, -10.589814, -6.4671307],
[-6.3967156, -13.911377, -1.1211915],
[-7.781247, -13.951557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(),expected_slice.numpy(),atol=1E-2 ) )
@slow
def UpperCamelCase ( self ):
A__ = TFEsmModel.from_pretrained('''facebook/esm2_t6_8M_UR50D''' )
A__ = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A__ = model(SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
A__ = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy(),expected_slice.numpy(),atol=1E-4 ) )
| 190 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = ['audio_values', 'audio_mask']
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_048 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Dict=[16, 16] , SCREAMING_SNAKE_CASE_ : Tuple=128 , SCREAMING_SNAKE_CASE_ : Optional[Any]=44_100 , SCREAMING_SNAKE_CASE_ : Optional[int]=86 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_048 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : int , ):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spectrogram_length
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = feature_size // self.patch_size[1]
lowerCAmelCase__ = n_fft
lowerCAmelCase__ = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = padding_value
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=SCREAMING_SNAKE_CASE_ , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : np.array ):
lowerCAmelCase__ = spectrogram(
SCREAMING_SNAKE_CASE_ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowerCAmelCase__ = log_spec[:, :-1]
lowerCAmelCase__ = log_spec - 20.0
lowerCAmelCase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase__ = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase__ = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
lowerCAmelCase__ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase__ = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase__ = np.ones([len(SCREAMING_SNAKE_CASE_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase__ = padded_audio_features * self.padding_value
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = audio_features[i]
lowerCAmelCase__ = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase__ = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCAmelCase__ = {'''audio_values''': padded_audio_features}
lowerCAmelCase__ = BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
return encoded_inputs
| 668 | 0 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : List[str]) -> str:
# A mock response for an HTTP head request to emulate server down
A_ = mock.Mock()
A_ = 500
A_ = {}
A_ = HTTPError
A_ = {}
# Download this model to make sure it's in the cache.
A_ = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=SCREAMING_SNAKE_CASE_) as mock_head:
A_ = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __snake_case ( self : int) -> int:
# A mock response for an HTTP head request to emulate server down
A_ = mock.Mock()
A_ = 500
A_ = {}
A_ = HTTPError
A_ = {}
# Download this model to make sure it's in the cache.
A_ = GPTaTokenizerFast.from_pretrained('gpt2')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=SCREAMING_SNAKE_CASE_) as mock_head:
A_ = GPTaTokenizerFast.from_pretrained('gpt2')
# This check we did call the fake head request
mock_head.assert_called()
def __snake_case ( self : List[Any]) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
try:
A_ = tempfile.mktemp()
with open(SCREAMING_SNAKE_CASE_ , 'wb') as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , SCREAMING_SNAKE_CASE_)
A_ = AlbertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_)
finally:
os.remove(SCREAMING_SNAKE_CASE_)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json'):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb') as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , SCREAMING_SNAKE_CASE_)
A_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json')
def __snake_case ( self : Optional[int]) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
A_ = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model')
@is_staging_test
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
_UpperCamelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def __snake_case ( cls : Union[str, Any]) -> List[Any]:
A_ = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE_)
@classmethod
def __snake_case ( cls : Optional[int]) -> Union[str, Any]:
try:
delete_repo(token=cls._token , repo_id='test-tokenizer')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer')
except HTTPError:
pass
def __snake_case ( self : int) -> Dict:
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = os.path.join(SCREAMING_SNAKE_CASE_ , 'vocab.txt')
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
A_ = BertTokenizer(SCREAMING_SNAKE_CASE_)
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token)
A_ = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ , repo_id='test-tokenizer' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token)
A_ = BertTokenizer.from_pretrained(F'{USER}/test-tokenizer')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
def __snake_case ( self : str) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = os.path.join(SCREAMING_SNAKE_CASE_ , 'vocab.txt')
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
A_ = BertTokenizer(SCREAMING_SNAKE_CASE_)
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token)
A_ = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=SCREAMING_SNAKE_CASE_ , use_auth_token=self._token)
A_ = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
@require_tokenizers
def __snake_case ( self : Dict) -> str:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = os.path.join(SCREAMING_SNAKE_CASE_ , 'vocab.txt')
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
A_ = CustomTokenizer(SCREAMING_SNAKE_CASE_)
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token)
A_ = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=SCREAMING_SNAKE_CASE_)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
A_ = os.path.join(SCREAMING_SNAKE_CASE_ , 'vocab.txt')
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
A_ = BertTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_)
bert_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_)
A_ = CustomTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_)
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token)
A_ = AutoTokenizer.from_pretrained(F'{USER}/test-dynamic-tokenizer' , trust_remote_code=SCREAMING_SNAKE_CASE_)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast')
A_ = AutoTokenizer.from_pretrained(
F'{USER}/test-dynamic-tokenizer' , use_fast=SCREAMING_SNAKE_CASE_ , trust_remote_code=SCREAMING_SNAKE_CASE_)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer')
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __snake_case ( self : Dict) -> str:
A_ = Trie()
trie.add('Hello 友達')
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}})
trie.add('Hello')
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}})
def __snake_case ( self : Tuple) -> Optional[int]:
A_ = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS] This is a extra_id_100'])
trie.add('[CLS]')
trie.add('extra_id_1')
trie.add('extra_id_100')
self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS]', ' This is a ', 'extra_id_100'])
def __snake_case ( self : Optional[int]) -> List[str]:
A_ = Trie()
trie.add('A')
self.assertEqual(trie.split('ABC') , ['A', 'BC'])
self.assertEqual(trie.split('BCA') , ['BC', 'A'])
def __snake_case ( self : List[Any]) -> Dict:
A_ = Trie()
trie.add('TOKEN]')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]'])
def __snake_case ( self : int) -> List[Any]:
A_ = Trie()
trie.add('A')
trie.add('P')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]'])
def __snake_case ( self : Optional[Any]) -> str:
A_ = Trie()
trie.add('AB')
trie.add('B')
trie.add('C')
self.assertEqual(trie.split('ABC') , ['AB', 'C'])
def __snake_case ( self : Optional[Any]) -> str:
A_ = Trie()
trie.add('ABC')
trie.add('B')
trie.add('CD')
self.assertEqual(trie.split('ABCD') , ['ABC', 'D'])
def __snake_case ( self : Any) -> Tuple:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
A_ = Trie()
A_ = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3])
self.assertEqual(SCREAMING_SNAKE_CASE_ , ['AB', 'C'])
| 366 |
from collections import namedtuple
_UpperCAmelCase : Dict = namedtuple("from_to", "from_ to")
_UpperCAmelCase : str = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : str , lowercase__ : str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
"""simple docstring"""
import functools
def _SCREAMING_SNAKE_CASE ( _lowercase : str , _lowercase : str ) ->int:
'''simple docstring'''
a : Tuple = len(lowercase__ )
a : int = len(lowercase__ )
@functools.cache
def min_distance(_lowercase : int , _lowercase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
a : str = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowercase__ ) , 1 + min_distance(lowercase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = collection[i]
lowerCAmelCase__ = 0
lowerCAmelCase__ = i - 1
while low <= high:
lowerCAmelCase__ = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase__ = mid - 1
else:
lowerCAmelCase__ = mid + 1
for j in range(lowercase__ , lowercase__ , -1 ):
lowerCAmelCase__ = collection[j - 1]
lowerCAmelCase__ = val
return collection
if __name__ == "__main__":
_UpperCAmelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase : Tuple = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 668 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def A ( __UpperCamelCase ) -> List[str]:
A__ = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
A__ = 1_024
A__ = 4_096
A__ = 24
A__ = 16
A__ = [5, 11, 17, 23]
A__ = [256, 512, 1_024, 1_024]
A__ = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
A__ = 768
A__ = [1, 1, 1, 0.5]
A__ = [256, 512, 768, 768]
A__ = 150
A__ = 16
A__ = (1, 384, 384)
A__ = False
A__ = 'project'
if "ade" in checkpoint_url:
A__ = True
A__ = 768
A__ = [1, 1, 1, 0.5]
A__ = 150
A__ = 16
A__ = 'huggingface/label-files'
A__ = 'ade20k-id2label.json'
A__ = json.load(open(cached_download(hf_hub_url(lowercase__ , lowercase__ , repo_type='dataset' ) ) , 'r' ) )
A__ = {int(lowercase__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = [1, 150, 480, 480]
return config, expected_shape
def A ( __UpperCamelCase ) -> str:
A__ = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowercase__ , lowercase__ )
def A ( __UpperCamelCase ) -> Dict:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A__ = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
A__ = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
A__ = name.replace('patch_embed' , '' )
if "pos_embed" in name:
A__ = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
A__ = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
A__ = name.replace('proj' , 'projection' )
if "blocks" in name:
A__ = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
A__ = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A__ = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
A__ = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
A__ = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
A__ = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
A__ = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
A__ = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
A__ = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
A__ = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
A__ = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
A__ = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A__ = name.replace(f'''refinenet{layer_idx}''' , f'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
A__ = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
A__ = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
A__ = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
A__ = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
A__ = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A__ = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
A__ = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
A__ = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
A__ = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A__ = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
A__ = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
A__ = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
A__ = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
A__ = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
A__ = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
A__ = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
A__ = name.replace('pretrained' , 'dpt' )
if "bn" in name:
A__ = name.replace('bn' , 'batch_norm' )
if "head" in name:
A__ = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
A__ = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
A__ = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
A__ = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
A__ = name.replace('..' , '.' )
if "stem.conv" in name:
A__ = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
A__ = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
A__ = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
A__ = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
A__ = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
A__ = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
A__ = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
A__ = state_dict.pop(f'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[: config.hidden_size, :]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def A ( ) -> Union[str, Any]:
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def A ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
A__ , A__ = get_dpt_config(lowercase__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
A__ = torch.load(lowercase__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(lowercase__ )
# rename keys
for key in state_dict.copy().keys():
A__ = state_dict.pop(lowercase__ )
A__ = val
# read in qkv matrices
read_in_q_k_v(lowercase__ , lowercase__ )
# load HuggingFace model
A__ = DPTForSemanticSegmentation(lowercase__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# Check outputs on an image
A__ = 480 if 'ade' in checkpoint_url else 384
A__ = DPTImageProcessor(size=lowercase__ )
A__ = prepare_img()
A__ = image_processor(lowercase__ , return_tensors='pt' )
# forward pass
A__ = model(**lowercase__ ).logits if 'ade' in checkpoint_url else model(**lowercase__ ).predicted_depth
if show_prediction:
A__ = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=lowercase__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowercase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowercase__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 9 |
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ ) + 1
lowerCAmelCase__ = len(lowercase__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowerCAmelCase__ = [[0 for i in range(lowercase__ )] for j in range(lowercase__ )]
# since string of zero length match pattern of zero length
lowerCAmelCase__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowercase__ ):
lowerCAmelCase__ = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowercase__ ):
for j in range(1 , lowercase__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowerCAmelCase__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowerCAmelCase__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowerCAmelCase__ = dp[i - 1][j]
else:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCAmelCase : Union[str, Any] = "aab"
_UpperCAmelCase : Dict = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 668 | 0 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> list:
SCREAMING_SNAKE_CASE__ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_A = list(range(1_0, 0, -1))
print(F'Original: {arr}. Sorted: {odd_even_transposition(arr)}')
| 159 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "vocab.json"}
_UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
_UpperCAmelCase : Tuple = {"mgp-str": 27}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ :Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : Optional[Any]="[s]" , SCREAMING_SNAKE_CASE_ : Any="[GO]" , **SCREAMING_SNAKE_CASE_ : Dict ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __snake_case ( self : List[Any] ):
return len(self.vocab )
def __snake_case ( self : Optional[int] ):
return dict(self.vocab , **self.added_tokens_encoder )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return (vocab_file,)
| 668 | 0 |
def UpperCAmelCase_ ( _UpperCAmelCase :int , _UpperCAmelCase :int ) -> int:
'''simple docstring'''
A_ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
A_ = n - k
# Calculate C(n,k)
for i in range(lowercase__ ):
result *= n - i
result //= i + 1
return result
def UpperCAmelCase_ ( _UpperCAmelCase :int ) -> int:
'''simple docstring'''
return binomial_coefficient(2 * node_count , lowercase__ ) // (node_count + 1)
def UpperCAmelCase_ ( _UpperCAmelCase :int ) -> int:
'''simple docstring'''
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
A_ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def UpperCAmelCase_ ( _UpperCAmelCase :int ) -> int:
'''simple docstring'''
return catalan_number(lowercase__ ) * factorial(lowercase__ )
if __name__ == "__main__":
a__ : Dict = int(input('Enter the number of nodes: ').strip() or 0)
if node_count <= 0:
raise ValueError('We need some nodes to work with.')
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 188 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 0 |
import random
def __UpperCamelCase (lowerCAmelCase : list, lowerCAmelCase : int ) -> tuple:
A , A , A = [], [], []
for element in data:
if element < pivot:
less.append(lowercase__ )
elif element > pivot:
greater.append(lowercase__ )
else:
equal.append(lowercase__ )
return less, equal, greater
def __UpperCamelCase (lowerCAmelCase : list, lowerCAmelCase : int ) -> Dict:
if index >= len(lowercase__ ) or index < 0:
return None
A = items[random.randint(0, len(lowercase__ ) - 1 )]
A = 0
A , A , A = _partition(lowercase__, lowercase__ )
A = len(lowercase__ )
A = len(lowercase__ )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(lowercase__, lowercase__ )
# must be in larger
else:
return quick_select(lowercase__, index - (m + count) )
| 699 |
from collections import deque
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = process_name # process name
lowerCAmelCase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase__ = arrival_time
lowerCAmelCase__ = burst_time # remaining burst time
lowerCAmelCase__ = 0 # total time of the process wait in ready queue
lowerCAmelCase__ = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ):
# total number of mlfq's queues
lowerCAmelCase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase__ = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase__ = queue
# current time
lowerCAmelCase__ = current_time
# finished process is in this sequence queue
lowerCAmelCase__ = deque()
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
return [q.burst_time for q in queue]
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
lowerCAmelCase__ = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE_ ) != 0:
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase__ = 0
# set the process's turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase__ = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase__ = 0
# set the finish time
lowerCAmelCase__ = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __snake_case ( self : int ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCAmelCase : List[Any] = Process("P1", 0, 53)
_UpperCAmelCase : Tuple = Process("P2", 0, 17)
_UpperCAmelCase : int = Process("P3", 0, 68)
_UpperCAmelCase : str = Process("P4", 0, 24)
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : List[Any] = [17, 25]
_UpperCAmelCase : Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_UpperCAmelCase : Tuple = Process("P1", 0, 53)
_UpperCAmelCase : List[str] = Process("P2", 0, 17)
_UpperCAmelCase : Any = Process("P3", 0, 68)
_UpperCAmelCase : List[Any] = Process("P4", 0, 24)
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : int = [17, 25]
_UpperCAmelCase : str = deque([Pa, Pa, Pa, Pa])
_UpperCAmelCase : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCAmelCase : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 668 | 0 |
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCamelCase_ ( snake_case__ , snake_case__):
"""simple docstring"""
@register_to_config
def __init__( self : Tuple , UpperCAmelCase__ : int = 7_6_8 , ) -> Any:
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.zeros(1 , SCREAMING_SNAKE_CASE_ ) )
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.ones(1 , SCREAMING_SNAKE_CASE_ ) )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Union[str, torch.device]] = None , UpperCAmelCase__ : Optional[torch.dtype] = None , ) -> Any:
__SCREAMING_SNAKE_CASE = nn.Parameter(self.mean.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) )
__SCREAMING_SNAKE_CASE = nn.Parameter(self.std.to(SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ ) )
return self
def UpperCAmelCase_ ( self : int , UpperCAmelCase__ : List[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = (embeds - self.mean) * 1.0 / self.std
return embeds
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : int ) -> str:
__SCREAMING_SNAKE_CASE = (embeds * self.std) + self.mean
return embeds
| 682 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Tuple = "true"
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int=82 , lowercase__ : str=16 ) -> Tuple:
'''simple docstring'''
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(lowercase__ )
lowerCAmelCase__ = RegressionDataset(length=lowercase__ )
lowerCAmelCase__ = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ : Any ):
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ : Any ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
lowerCAmelCase__ = get_dataloader(lowercase__ , not dispatch_batches )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase__ = []
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
with torch.no_grad():
lowerCAmelCase__ = model(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=82 , lowercase__ : List[Any]=False , lowercase__ : Optional[int]=False , lowercase__ : Union[str, Any]=16 ) -> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'
def lowerCAmelCase_ (lowercase__ : bool = False , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ , lowerCAmelCase__ = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
lowerCAmelCase__ = metric.compute()
# Then do distributed
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ = batch['''labels''']
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
lowerCAmelCase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowerCAmelCase_ () -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ = Accelerator()
test_torch_metrics(lowercase__ , 5_12 )
accelerator.state._reset_state()
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 0 |
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowercase__ ) , lowercase__ )
return number - int(lowercase__ )
if __name__ == "__main__":
print(decimal_isolate(1.5_3, 0))
print(decimal_isolate(3_5.3_4_5, 1))
print(decimal_isolate(3_5.3_4_5, 2))
print(decimal_isolate(3_5.3_4_5, 3))
print(decimal_isolate(-1_4.7_8_9, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-1_4.1_2_3, 1))
print(decimal_isolate(-1_4.1_2_3, 2))
print(decimal_isolate(-1_4.1_2_3, 3))
| 493 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase : str = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_UpperCAmelCase : List[str] = {
"ctrl": 256,
}
_UpperCAmelCase : int = {
"Pregnancy": 168_629,
"Christianity": 7_675,
"Explain": 106_423,
"Fitness": 63_440,
"Saving": 63_163,
"Ask": 27_171,
"Ass": 95_985,
"Joke": 163_509,
"Questions": 45_622,
"Thoughts": 49_605,
"Retail": 52_342,
"Feminism": 164_338,
"Writing": 11_992,
"Atheism": 192_263,
"Netflix": 48_616,
"Computing": 39_639,
"Opinion": 43_213,
"Alone": 44_967,
"Funny": 58_917,
"Gaming": 40_358,
"Human": 4_088,
"India": 1_331,
"Joker": 77_138,
"Diet": 36_206,
"Legal": 11_859,
"Norman": 4_939,
"Tip": 72_689,
"Weight": 52_343,
"Movies": 46_273,
"Running": 23_425,
"Science": 2_090,
"Horror": 37_793,
"Confession": 60_572,
"Finance": 12_250,
"Politics": 16_360,
"Scary": 191_985,
"Support": 12_654,
"Technologies": 32_516,
"Teenage": 66_160,
"Event": 32_769,
"Learned": 67_460,
"Notion": 182_770,
"Wikipedia": 37_583,
"Books": 6_665,
"Extract": 76_050,
"Confessions": 102_701,
"Conspiracy": 75_932,
"Links": 63_674,
"Narcissus": 150_425,
"Relationship": 54_766,
"Relationships": 134_796,
"Reviews": 41_671,
"News": 4_256,
"Translation": 26_820,
"multilingual": 128_406,
}
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(lowercase__ )
return pairs
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Optional[int] = CONTROL_CODES
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , **SCREAMING_SNAKE_CASE_ : Tuple ):
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase__ = {}
@property
def __snake_case ( self : List[str] ):
return len(self.encoder )
def __snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Any ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCAmelCase__ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''@@ '''.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
return word
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowerCAmelCase__ = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 668 | 0 |
from __future__ import annotations
import math
def __SCREAMING_SNAKE_CASE ( a__ : float ,a__ : int ) -> float:
__A : Dict = u
for i in range(1 ,lowercase__ ):
__A : List[Any] = temp * (u - i)
return temp
def __SCREAMING_SNAKE_CASE ( ) -> None:
__A : Union[str, Any] = int(input("""enter the numbers of values: """ ) )
__A : List[str] = []
for _ in range(lowercase__ ):
y.append([] )
for i in range(lowercase__ ):
for j in range(lowercase__ ):
y[i].append(lowercase__ )
__A : str = 0
print("""enter the values of parameters in a list: """ )
__A : Dict = list(map(lowercase__ ,input().split() ) )
print("""enter the values of corresponding parameters: """ )
for i in range(lowercase__ ):
__A : Optional[int] = float(input() )
__A : Tuple = int(input("""enter the value to interpolate: """ ) )
__A : int = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 ,lowercase__ ):
for j in range(n - i ):
__A : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
__A : List[str] = y[0][0]
for i in range(1 ,lowercase__ ):
summ += (ucal(lowercase__ ,lowercase__ ) * y[0][i]) / math.factorial(lowercase__ )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 17 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCAmelCase_ :
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
raise NotImplementedError()
def __snake_case ( self : Union[str, Any] ):
raise NotImplementedError()
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = skip_prompt
lowerCAmelCase__ = decode_kwargs
# variables used in the streaming process
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
lowerCAmelCase__ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCAmelCase__ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
# If the last token is a CJK character, we print the characters.
elif len(SCREAMING_SNAKE_CASE_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCAmelCase__ = text[self.print_len :]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCAmelCase__ = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
self.on_finalized_text(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = True
self.on_finalized_text(SCREAMING_SNAKE_CASE_ , stream_end=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
print(SCREAMING_SNAKE_CASE_ , flush=SCREAMING_SNAKE_CASE_ , end='''''' if not stream_end else None )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[float] = None , **SCREAMING_SNAKE_CASE_ : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = Queue()
lowerCAmelCase__ = None
lowerCAmelCase__ = timeout
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
self.text_queue.put(SCREAMING_SNAKE_CASE_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[int] ):
return self
def __snake_case ( self : int ):
lowerCAmelCase__ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 668 | 0 |
'''simple docstring'''
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = "▁"
_UpperCAmelCase : Union[str, Any] = {"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"}
_UpperCAmelCase : Dict = {
"sentencepiece_model_file": "sentencepiece.bpe.model",
"vocab_file": "vocab.txt",
}
_UpperCAmelCase : Dict = {
"vocab_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt",
},
"sentencepiece_model_file": {
"ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
"ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model",
},
}
_UpperCAmelCase : int = {
"ernie-m-base": 5_14,
"ernie-m-large": 5_14,
}
_UpperCAmelCase : str = {
"ernie-m-base": {"do_lower_case": False},
"ernie-m-large": {"do_lower_case": False},
}
class __magic_name__ ( snake_case__ ):
UpperCamelCase__ = ["input_ids"]
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = RESOURCE_FILES_NAMES
def __init__( self , snake_case_ , snake_case_=None , snake_case_=False , snake_case_="utf8" , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_ = None , **snake_case_ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
lowercase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , vocab_file=SCREAMING_SNAKE_CASE_ , encoding=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowercase =do_lower_case
lowercase =sentencepiece_model_ckpt
lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(SCREAMING_SNAKE_CASE_ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase =self.load_vocab(filepath=SCREAMING_SNAKE_CASE_ )
else:
lowercase ={self.sp_model.id_to_piece(SCREAMING_SNAKE_CASE_ ): id for id in range(self.sp_model.get_piece_size() )}
lowercase ={v: k for k, v in self.vocab.items()}
def _A( self , snake_case_ ):
if text is None:
return None
lowercase =self.tokenize(SCREAMING_SNAKE_CASE_ )
lowercase , lowercase ='''''', []
for i, ch in enumerate(SCREAMING_SNAKE_CASE_ ):
if ch in self.SP_CHAR_MAPPING:
lowercase =self.SP_CHAR_MAPPING.get(SCREAMING_SNAKE_CASE_ )
else:
lowercase =unicodedata.normalize('''NFKC''' , SCREAMING_SNAKE_CASE_ )
if self.is_whitespace(SCREAMING_SNAKE_CASE_ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(SCREAMING_SNAKE_CASE_ ) )
lowercase , lowercase , lowercase =normalized_text, [], 0
if self.do_lower_case:
lowercase =text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase =token[1:]
lowercase =text[offset:].index(SCREAMING_SNAKE_CASE_ ) + offset
lowercase =start + len(SCREAMING_SNAKE_CASE_ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase =end
return token_mapping
@property
def _A( self ):
return len(self.vocab )
def _A( self ):
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self ):
lowercase =self.__dict__.copy()
lowercase =None
return state
def __setstate__( self , snake_case_ ):
lowercase =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase ={}
lowercase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def _A( self , snake_case_ ):
return "".join((self.SP_CHAR_MAPPING.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for c in text) )
def _A( self , snake_case_ , snake_case_=False , snake_case_=64 , snake_case_=0.1 ):
if self.sp_model_kwargs.get('''enable_sampling''' ) is True:
lowercase =True
if self.sp_model_kwargs.get('''alpha''' ) is not None:
lowercase =self.sp_model_kwargs.get('''alpha''' )
if self.sp_model_kwargs.get('''nbest_size''' ) is not None:
lowercase =self.sp_model_kwargs.get('''nbest_size''' )
if not enable_sampling:
lowercase =self.sp_model.EncodeAsPieces(SCREAMING_SNAKE_CASE_ )
else:
lowercase =self.sp_model.SampleEncodeAsPieces(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase =[]
for pi, piece in enumerate(SCREAMING_SNAKE_CASE_ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(SCREAMING_SNAKE_CASE_ ) and pi != 0:
new_pieces.append(SCREAMING_SNAKE_CASE_ )
continue
else:
continue
lowercase =0
for i, chunk in enumerate(SCREAMING_SNAKE_CASE_ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(SCREAMING_SNAKE_CASE_ ) or self.is_punct(SCREAMING_SNAKE_CASE_ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(SCREAMING_SNAKE_CASE_ )
lowercase =i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase =i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase =i
if len(SCREAMING_SNAKE_CASE_ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def _A( self , snake_case_ ):
lowercase =''''''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , ''' ''' ).strip()
return out_string
def _A( self , snake_case_ ):
lowercase =self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
lowercase =''''''.join(SCREAMING_SNAKE_CASE_ ).replace(SCREAMING_SNAKE_CASE_ , ''' ''' ).strip()
return out_string
def _A( self , snake_case_ ):
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def _A( self , snake_case_ ):
return self.reverse_vocab.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def _A( self , snake_case_ , snake_case_=None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase =[self.cls_token_id]
lowercase =[self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def _A( self , snake_case_ , snake_case_=None ):
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def _A( self , snake_case_ , snake_case_=None , snake_case_=False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def _A( self , snake_case_ , snake_case_ = None ):
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(SCREAMING_SNAKE_CASE_ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(SCREAMING_SNAKE_CASE_ ) + 1) + [1] * (len(SCREAMING_SNAKE_CASE_ ) + 3)
def _A( self , snake_case_ ):
if "\u4e00" <= char <= "\u9fff":
return True
return False
def _A( self , snake_case_ ):
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def _A( self , snake_case_ ):
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def _A( self , snake_case_ ):
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(SCREAMING_SNAKE_CASE_ ) == 1:
lowercase =unicodedata.category(SCREAMING_SNAKE_CASE_ )
if cat == "Zs":
return True
return False
def _A( self , snake_case_ ):
lowercase ={}
with io.open(SCREAMING_SNAKE_CASE_ , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(SCREAMING_SNAKE_CASE_ ):
lowercase =line.rstrip('''\n''' )
lowercase =int(SCREAMING_SNAKE_CASE_ )
return token_to_idx
def _A( self , snake_case_ , snake_case_ = None ):
lowercase =0
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
lowercase =os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase =(filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
''' Please check that the vocabulary is not corrupted!''' )
lowercase =token_index
writer.write(token + '''\n''' )
index += 1
lowercase =os.path.join(SCREAMING_SNAKE_CASE_ , '''sentencepiece.bpe.model''' )
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowercase =self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (vocab_file,)
| 72 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Union[str, Any] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 668 | 0 |
import sys
def UpperCamelCase__( UpperCamelCase__ : List[Any] )->Optional[Any]:
A__ = len(lowercase__ )
A__ = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
A__ = [[0 for x in range(lowercase__ )] for x in range(lowercase__ )]
for chain_length in range(2 , lowercase__ ):
for a in range(1 , n - chain_length + 1 ):
A__ = a + chain_length - 1
A__ = sys.maxsize
for c in range(lowercase__ , lowercase__ ):
A__ = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A__ = cost
A__ = c
return matrix, sol
def UpperCamelCase__( UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] )->Tuple:
if i == j:
print('''A''' + str(lowercase__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(lowercase__ , lowercase__ , optimal_solution[i][j] )
print_optiomal_solution(lowercase__ , optimal_solution[i][j] + 1 , lowercase__ )
print(''')''' , end=''' ''' )
def UpperCamelCase__( )->str:
A__ = [30, 35, 15, 5, 10, 20, 25]
A__ = len(lowercase__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A__ , A__ = matrix_chain_order(lowercase__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(lowercase__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 190 |
from __future__ import annotations
def lowerCAmelCase_ (lowercase__ : list[int] , lowercase__ : list[int] , lowercase__ : int ) -> tuple[float, list[float]]:
'''simple docstring'''
lowerCAmelCase__ = list(range(len(lowercase__ ) ) )
lowerCAmelCase__ = [v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
lowerCAmelCase__ = 0
lowerCAmelCase__ = [0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
lowerCAmelCase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCAmelCase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class __UpperCAmelCase ( snake_case__ ):
'''simple docstring'''
_UpperCamelCase = 'xlnet'
_UpperCamelCase = ['mems']
_UpperCamelCase = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Any , _lowercase : Union[str, Any]=32_000 , _lowercase : List[str]=1_024 , _lowercase : Union[str, Any]=24 , _lowercase : str=16 , _lowercase : str=4_096 , _lowercase : Optional[Any]="gelu" , _lowercase : Optional[int]=True , _lowercase : Optional[Any]="bi" , _lowercase : List[str]=0.02 , _lowercase : Any=1E-12 , _lowercase : Optional[Any]=0.1 , _lowercase : Union[str, Any]=512 , _lowercase : int=None , _lowercase : Dict=True , _lowercase : Dict=False , _lowercase : List[str]=False , _lowercase : Any=-1 , _lowercase : Optional[int]=False , _lowercase : List[Any]="last" , _lowercase : Optional[int]=True , _lowercase : Union[str, Any]="tanh" , _lowercase : int=0.1 , _lowercase : Tuple=5 , _lowercase : Optional[int]=5 , _lowercase : Optional[Any]=5 , _lowercase : Union[str, Any]=1 , _lowercase : Optional[Any]=2 , **_lowercase : Any , ) -> Optional[Any]:
A_ = vocab_size
A_ = d_model
A_ = n_layer
A_ = n_head
if d_model % n_head != 0:
raise ValueError(F'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0')
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
F'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})')
A_ = d_model // n_head
A_ = ff_activation
A_ = d_inner
A_ = untie_r
A_ = attn_type
A_ = initializer_range
A_ = layer_norm_eps
A_ = dropout
A_ = mem_len
A_ = reuse_len
A_ = bi_data
A_ = clamp_len
A_ = same_length
A_ = summary_type
A_ = summary_use_proj
A_ = summary_activation
A_ = summary_last_dropout
A_ = start_n_top
A_ = end_n_top
A_ = bos_token_id
A_ = pad_token_id
A_ = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , SCREAMING_SNAKE_CASE_ , )
A_ = kwargs['use_cache']
A_ = use_mems_eval
A_ = use_mems_train
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
@property
def __snake_case ( self : Optional[Any]) -> List[str]:
logger.info(F'The model {self.model_type} is one of the few models that has no sequence length limit.')
return -1
@max_position_embeddings.setter
def __snake_case ( self : Dict , _lowercase : List[str]) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F'The model {self.model_type} is one of the few models that has no sequence length limit.')
| 366 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Any:
'''simple docstring'''
if issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = parquet_path
elif issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = [parquet_path]
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[Any]=("train",) ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
lowerCAmelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader({'''train''': parquet_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
if split:
lowerCAmelCase__ = {split: parquet_path}
else:
lowerCAmelCase__ = '''train'''
lowerCAmelCase__ = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ (lowercase__ : Dict , lowercase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ = {'''image''': [image_path]}
lowerCAmelCase__ = Features({'''image''': Image()} )
lowerCAmelCase__ = Dataset.from_dict(lowercase__ , features=lowercase__ )
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : str ) -> Tuple:
'''simple docstring'''
assert get_writer_batch_size(lowercase__ ) == expected
| 668 | 0 |
"""simple docstring"""
import mpmath # for roots of unity
import numpy as np
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Tuple:
# Input as list
a : Any = list(poly_a or [0] )[:]
a : int = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
a : Union[str, Any] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
a : Dict = len(self.polyB )
# Add 0 to make lengths equal a power of 2
a : int = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
a : Any = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
a : Tuple = self.__multiply()
def __a ( self , lowerCAmelCase__ ) -> List[Any]:
a : Dict = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return dft[0]
#
a : Union[str, Any] = self.c_max_length // 2
while next_ncol > 0:
a : Dict = [[] for i in range(SCREAMING_SNAKE_CASE_ )]
a : Dict = self.root**next_ncol
# First half of next step
a : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE_ ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
a : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(SCREAMING_SNAKE_CASE_ ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
a : Optional[Any] = new_dft
a : Optional[Any] = next_ncol // 2
return dft[0]
def __a ( self ) -> Tuple:
a : str = self.__dft("A" )
a : Tuple = self.__dft("B" )
a : str = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
a : Optional[int] = 2
while next_ncol <= self.c_max_length:
a : Dict = [[] for i in range(SCREAMING_SNAKE_CASE_ )]
a : Any = self.root ** (next_ncol // 2)
a : Optional[int] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
a : Dict = new_inverse_c
next_ncol *= 2
# Unpack
a : Tuple = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self ) -> Any:
a : Optional[int] = "A = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyA[: self.len_A] ) )
a : List[str] = "B = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.polyB[: self.len_B] ) )
a : int = "A*B = " + " + ".join(
f"""{coef}*x^{i}""" for coef, i in enumerate(self.product ) )
return f"""{a}\n{b}\n{c}"""
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 633 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Dict = logging.get_logger(__name__)
_UpperCAmelCase : Optional[Any] = {"vocab_file": "sentencepiece.bpe.model"}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
}
}
_UpperCAmelCase : Union[str, Any] = {
"camembert-base": 512,
}
_UpperCAmelCase : Dict = "▁"
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Dict = ['input_ids', 'attention_mask']
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Any="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="</s>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="</s>" , SCREAMING_SNAKE_CASE_ : Optional[int]="<s>" , SCREAMING_SNAKE_CASE_ : List[Any]="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : int=["<s>NOTUSED", "</s>NOTUSED"] , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : str , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase__ = AddedToken(SCREAMING_SNAKE_CASE_ , lstrip=SCREAMING_SNAKE_CASE_ , rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , additional_special_tokens=SCREAMING_SNAKE_CASE_ , sp_model_kwargs=self.sp_model_kwargs , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCAmelCase__ = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3}
lowerCAmelCase__ = len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
lowerCAmelCase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_ , token_ids_a=SCREAMING_SNAKE_CASE_ , already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
lowerCAmelCase__ = [self.sep_token_id]
lowerCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __snake_case ( self : List[Any] ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __snake_case ( self : int ):
lowerCAmelCase__ = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(SCREAMING_SNAKE_CASE_ , out_type=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Dict ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(SCREAMING_SNAKE_CASE_ )
return out_string.strip()
def __getstate__( self : Optional[Any] ):
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : str , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(SCREAMING_SNAKE_CASE_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , SCREAMING_SNAKE_CASE_ )
elif not os.path.isfile(self.vocab_file ):
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE_ )
return (out_vocab_file,)
| 668 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class __lowerCAmelCase ( snake_case__ ):
"""simple docstring"""
A__ : Optional[Any] = 'data2vec-vision'
def __init__( self : str , _snake_case : Dict=7_68 , _snake_case : Optional[int]=12 , _snake_case : int=12 , _snake_case : List[str]=30_72 , _snake_case : Union[str, Any]="gelu" , _snake_case : Tuple=0.0 , _snake_case : Union[str, Any]=0.0 , _snake_case : Tuple=0.02 , _snake_case : Any=1E-12 , _snake_case : Tuple=2_24 , _snake_case : Any=16 , _snake_case : Optional[Any]=3 , _snake_case : str=False , _snake_case : Dict=False , _snake_case : Dict=False , _snake_case : Any=False , _snake_case : List[Any]=0.1 , _snake_case : Tuple=0.1 , _snake_case : Union[str, Any]=True , _snake_case : List[str]=[3, 5, 7, 11] , _snake_case : Optional[Any]=[1, 2, 3, 6] , _snake_case : Union[str, Any]=True , _snake_case : Any=0.4 , _snake_case : Tuple=2_56 , _snake_case : Optional[Any]=1 , _snake_case : int=False , _snake_case : int=2_55 , **_snake_case : List[Any] , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = use_mask_token
A__ = use_absolute_position_embeddings
A__ = use_relative_position_bias
A__ = use_shared_relative_position_bias
A__ = layer_scale_init_value
A__ = drop_path_rate
A__ = use_mean_pooling
# decode head attributes (semantic segmentation)
A__ = out_indices
A__ = pool_scales
# auxiliary head attributes (semantic segmentation)
A__ = use_auxiliary_head
A__ = auxiliary_loss_weight
A__ = auxiliary_channels
A__ = auxiliary_num_convs
A__ = auxiliary_concat_input
A__ = semantic_loss_ignore_index
class __lowerCAmelCase ( snake_case__ ):
"""simple docstring"""
A__ : Optional[int] = version.parse("1.11" )
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _a ( self : int ):
"""simple docstring"""
return 1E-4
| 9 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_UpperCAmelCase : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
UpperCamelCase_ :Optional[str] = field(
default='tab_fact' , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} , )
UpperCamelCase_ :int = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[int] = field(
default=snake_case__ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the training data.'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
UpperCamelCase_ :Optional[str] = field(default=snake_case__ , metadata={'help': 'A csv or a json file containing the test data.'} )
def __snake_case ( self : Union[str, Any] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('''Need either a GLUE task, a training/validation file or a dataset name.''' )
else:
lowerCAmelCase__ = self.train_file.split('''.''' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
lowerCAmelCase__ = self.validation_file.split('''.''' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase_ :
UpperCamelCase_ :str = field(
default=snake_case__ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
UpperCamelCase_ :Optional[str] = field(
default=snake_case__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
UpperCamelCase_ :str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
UpperCamelCase_ :bool = field(
default=snake_case__ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def lowerCAmelCase_ () -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowerCAmelCase__ = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
datasets.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
lowerCAmelCase__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
lowerCAmelCase__ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
lowerCAmelCase__ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
lowerCAmelCase__ = data_args.train_file.split('''.''' )[-1]
lowerCAmelCase__ = data_args.test_file.split('''.''' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
lowerCAmelCase__ = data_args.test_file
else:
raise ValueError('''Need either a GLUE task or a test file for `do_predict`.''' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('''.csv''' ):
# Loading a dataset from local csv files
lowerCAmelCase__ = load_dataset('''csv''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
lowerCAmelCase__ = load_dataset('''json''' , data_files=lowercase__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
lowerCAmelCase__ = raw_datasets['''train'''].features['''label'''].names
lowerCAmelCase__ = len(lowercase__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
lowerCAmelCase__ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowercase__ , )
lowerCAmelCase__ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase__ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
lowerCAmelCase__ = {'''Refused''': 0, '''Entailed''': 1}
lowerCAmelCase__ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
lowerCAmelCase__ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowercase__ : Any ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowercase__ : Dict ):
lowerCAmelCase__ = [_table_row.split('''#''' ) for _table_row in _table_text.strip('''\n''' ).split('''\n''' )]
lowerCAmelCase__ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
lowerCAmelCase__ = examples['''statement''']
lowerCAmelCase__ = list(map(_convert_table_text_to_pandas , examples['''table_text'''] ) )
lowerCAmelCase__ = tokenizer(lowercase__ , lowercase__ , padding=lowercase__ , max_length=lowercase__ , truncation=lowercase__ )
lowerCAmelCase__ = examples['''label''']
return result
with training_args.main_process_first(desc='''dataset map pre-processing''' ):
lowerCAmelCase__ = raw_datasets.map(
lowercase__ , batched=lowercase__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on dataset''' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
lowerCAmelCase__ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
lowerCAmelCase__ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
lowerCAmelCase__ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
lowerCAmelCase__ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('''--do_predict requires a test dataset''' )
lowerCAmelCase__ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
lowerCAmelCase__ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowercase__ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowercase__ : EvalPrediction ):
lowerCAmelCase__ = p.predictions[0] if isinstance(p.predictions , lowercase__ ) else p.predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase__ = default_data_collator
elif training_args.fpaa:
lowerCAmelCase__ = DataCollatorWithPadding(lowercase__ , pad_to_multiple_of=8 )
else:
lowerCAmelCase__ = None
# Initialize our Trainer
lowerCAmelCase__ = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowercase__ , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
lowerCAmelCase__ = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase__ = last_checkpoint
lowerCAmelCase__ = trainer.train(resume_from_checkpoint=lowercase__ )
lowerCAmelCase__ = train_result.metrics
lowerCAmelCase__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowercase__ )
)
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , lowercase__ )
trainer.save_metrics('''train''' , lowercase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCAmelCase__ = trainer.evaluate(eval_dataset=lowercase__ )
lowerCAmelCase__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowercase__ )
lowerCAmelCase__ = min(lowercase__ , len(lowercase__ ) )
trainer.log_metrics('''eval''' , lowercase__ )
trainer.save_metrics('''eval''' , lowercase__ )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
lowerCAmelCase__ = predict_dataset.remove_columns('''label''' )
lowerCAmelCase__ = trainer.predict(lowercase__ , metric_key_prefix='''predict''' ).predictions
lowerCAmelCase__ = np.argmax(lowercase__ , axis=1 )
lowerCAmelCase__ = os.path.join(training_args.output_dir , '''predict_results_tabfact.txt''' )
if trainer.is_world_process_zero():
with open(lowercase__ , '''w''' ) as writer:
logger.info('''***** Predict Results *****''' )
writer.write('''index\tprediction\n''' )
for index, item in enumerate(lowercase__ ):
lowerCAmelCase__ = label_list[item]
writer.write(f'{index}\t{item}\n' )
lowerCAmelCase__ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Dict:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 0 |
"""simple docstring"""
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ = checkpoints.load_tax_checkpoint(lowercase__ )
SCREAMING_SNAKE_CASE__ = flatten_dict(lowercase__ )
return flax_params
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
SCREAMING_SNAKE_CASE__ = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
SCREAMING_SNAKE_CASE__ = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE__ = new_key.replace(lowercase__ , lowercase__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
SCREAMING_SNAKE_CASE__ = new_key.replace(lowercase__ , lowercase__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE__ = re.sub(R"layers_(\d+)" , R"layer.\1" , lowercase__ )
SCREAMING_SNAKE_CASE__ = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
SCREAMING_SNAKE_CASE__ = re.sub(R"layers_(\d+)" , R"layer.\1" , lowercase__ )
SCREAMING_SNAKE_CASE__ = flax_dict[key]
SCREAMING_SNAKE_CASE__ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
SCREAMING_SNAKE_CASE__ = torch.from_numpy(converted_dict[key].T )
else:
SCREAMING_SNAKE_CASE__ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False ) -> Dict:
SCREAMING_SNAKE_CASE__ = get_flax_param(lowercase__ )
if not use_large:
SCREAMING_SNAKE_CASE__ = PixaStructVisionConfig()
SCREAMING_SNAKE_CASE__ = PixaStructTextConfig()
else:
SCREAMING_SNAKE_CASE__ = PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
SCREAMING_SNAKE_CASE__ = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
SCREAMING_SNAKE_CASE__ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowercase__ )
SCREAMING_SNAKE_CASE__ = PixaStructForConditionalGeneration(lowercase__ )
SCREAMING_SNAKE_CASE__ = rename_and_convert_flax_params(lowercase__ )
model.load_state_dict(lowercase__ )
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
SCREAMING_SNAKE_CASE__ = PixaStructImageProcessor()
SCREAMING_SNAKE_CASE__ = PixaStructProcessor(image_processor=lowercase__ , tokenizer=lowercase__ )
if use_large:
SCREAMING_SNAKE_CASE__ = 4_096
SCREAMING_SNAKE_CASE__ = True
# mkdir if needed
os.makedirs(lowercase__ , exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
processor.save_pretrained(lowercase__ )
print("Model saved in {}".format(lowercase__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 159 |
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : int ) -> float:
'''simple docstring'''
if digit_amount > 0:
return round(number - int(lowercase__ ) , lowercase__ )
return number - int(lowercase__ )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 668 | 0 |
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
a__ : str = logging.get_logger(__name__)
a__ : List[Any] = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn.grep_linear": "encoder.layers.*.attention.gru_rel_pos_linear",
"self_attn.relative_attention_bias": "encoder.layers.*.attention.rel_attn_embed",
"self_attn.grep_a": "encoder.layers.*.attention.gru_rel_pos_const",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "ctc_proj",
"mask_emb": "masked_spec_embed",
}
a__ : int = [
"ctc_proj",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCAmelCase_ ( _UpperCAmelCase :List[str] , _UpperCAmelCase :Tuple , _UpperCAmelCase :List[str] , _UpperCAmelCase :List[str] , _UpperCAmelCase :Union[str, Any] ) -> Tuple:
'''simple docstring'''
for attribute in key.split('''.''' ):
A_ = getattr(lowercase__ , lowercase__ )
if weight_type is not None:
A_ = getattr(lowercase__ , lowercase__ ).shape
else:
A_ = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
A_ = value
elif weight_type == "weight_g":
A_ = value
elif weight_type == "weight_v":
A_ = value
elif weight_type == "bias":
A_ = value
else:
A_ = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase_ ( _UpperCAmelCase :Optional[Any] , _UpperCAmelCase :List[str] ) -> Tuple:
'''simple docstring'''
A_ = []
A_ = fairseq_model.state_dict()
A_ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ = False
if "conv_layers" in name:
load_conv_layer(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == '''group''' , )
A_ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
A_ = True
if "*" in mapped_key:
A_ = name.split(lowercase__ )[0].split('''.''' )[-2]
A_ = mapped_key.replace('''*''' , lowercase__ )
if "weight_g" in name:
A_ = '''weight_g'''
elif "weight_v" in name:
A_ = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
A_ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A_ = '''weight'''
else:
A_ = None
set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
continue
if not is_used:
unused_weights.append(lowercase__ )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase_ ( _UpperCAmelCase :int , _UpperCAmelCase :Union[str, Any] , _UpperCAmelCase :Optional[int] , _UpperCAmelCase :Dict , _UpperCAmelCase :int ) -> Dict:
'''simple docstring'''
A_ = full_name.split('''conv_layers.''' )[-1]
A_ = name.split('''.''' )
A_ = int(items[0] )
A_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A_ = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A_ = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(lowercase__ )
@torch.no_grad()
def UpperCAmelCase_ ( _UpperCAmelCase :Union[str, Any] , _UpperCAmelCase :Union[str, Any] , _UpperCAmelCase :Any=None ) -> Any:
'''simple docstring'''
A_ = torch.load(lowercase__ )
A_ = WavLMConfigOrig(checkpoint['''cfg'''] )
A_ = WavLMOrig(lowercase__ )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
A_ = WavLMConfig.from_pretrained(lowercase__ )
else:
A_ = WavLMConfig()
A_ = WavLMModel(lowercase__ )
recursively_load_weights(lowercase__ , lowercase__ )
hf_wavlm.save_pretrained(lowercase__ )
if __name__ == "__main__":
a__ : int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
a__ : List[str] = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 188 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase_ :
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str]=13 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Any=True , SCREAMING_SNAKE_CASE_ : int=True , SCREAMING_SNAKE_CASE_ : Any=99 , SCREAMING_SNAKE_CASE_ : int=[1, 1, 2] , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : List[str]=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=8 , SCREAMING_SNAKE_CASE_ : int=37 , SCREAMING_SNAKE_CASE_ : str="gelu_new" , SCREAMING_SNAKE_CASE_ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=0.1 , SCREAMING_SNAKE_CASE_ : List[str]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : str=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : str=4 , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : str=False , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , ):
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , ):
lowerCAmelCase__ = TFFunnelForPreTraining(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = TFFunnelForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str , ):
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Tuple = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase_ :Optional[int] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ :Dict = False
UpperCamelCase_ :Tuple = False
def __snake_case ( self : int ):
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : str ):
self.config_tester.run_common_tests()
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@require_tf
class lowerCAmelCase_ ( snake_case__ , unittest.TestCase ):
UpperCamelCase_ :str = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ :Optional[Any] = False
UpperCamelCase_ :Any = False
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = TFFunnelModelTester(self , base=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Any ):
self.config_tester.run_common_tests()
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
| 668 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCAmelCase = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
_UpperCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 699 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
_UpperCAmelCase : int = Mapping[str, np.ndarray]
_UpperCAmelCase : Optional[Any] = Mapping[str, Any] # Is a nested dict.
_UpperCAmelCase : Optional[Any] = 0.01
@dataclasses.dataclass(frozen=snake_case__ )
class lowerCAmelCase_ :
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCamelCase_ :np.ndarray # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCamelCase_ :np.ndarray # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCamelCase_ :np.ndarray # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCamelCase_ :Optional[np.ndarray] = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCamelCase_ :Optional[str] = None
# Templates used to generate this protein (prediction-only)
UpperCamelCase_ :Optional[Sequence[str]] = None
# Chain corresponding to each parent
UpperCamelCase_ :Optional[Sequence[int]] = None
def lowerCAmelCase_ (lowercase__ : str ) -> Protein:
'''simple docstring'''
lowerCAmelCase__ = r'''(\[[A-Z]+\]\n)'''
lowerCAmelCase__ = [tag.strip() for tag in re.split(lowercase__ , lowercase__ ) if len(lowercase__ ) > 0]
lowerCAmelCase__ = zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowerCAmelCase__ = ["N", "CA", "C"]
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
for g in groups:
if "[PRIMARY]" == g[0]:
lowerCAmelCase__ = g[1][0].strip()
for i in range(len(lowercase__ ) ):
if seq[i] not in residue_constants.restypes:
lowerCAmelCase__ = '''X''' # FIXME: strings are immutable
lowerCAmelCase__ = np.array(
[residue_constants.restype_order.get(lowercase__ , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowerCAmelCase__ = []
for axis in range(3 ):
tertiary.append(list(map(lowercase__ , g[1][axis].split() ) ) )
lowerCAmelCase__ = np.array(lowercase__ )
lowerCAmelCase__ = np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowerCAmelCase__ = np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowerCAmelCase__ = np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowerCAmelCase__ = np.zeros(
(
len(lowercase__ ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(lowercase__ ):
lowerCAmelCase__ = 1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=lowercase__ , atom_mask=lowercase__ , aatype=lowercase__ , residue_index=np.arange(len(lowercase__ ) ) , b_factors=lowercase__ , )
def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : int = 0 ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.remark
if remark is not None:
pdb_headers.append(f'REMARK {remark}' )
lowerCAmelCase__ = prot.parents
lowerCAmelCase__ = prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowerCAmelCase__ = [p for i, p in zip(lowercase__ , lowercase__ ) if i == chain_id]
if parents is None or len(lowercase__ ) == 0:
lowerCAmelCase__ = ['''N/A''']
pdb_headers.append(f'PARENT {" ".join(lowercase__ )}' )
return pdb_headers
def lowerCAmelCase_ (lowercase__ : Protein , lowercase__ : str ) -> str:
'''simple docstring'''
lowerCAmelCase__ = []
lowerCAmelCase__ = pdb_str.split('''\n''' )
lowerCAmelCase__ = prot.remark
if remark is not None:
out_pdb_lines.append(f'REMARK {remark}' )
lowerCAmelCase__ = 42
if prot.parents is not None and len(prot.parents ) > 0:
lowerCAmelCase__ = []
if prot.parents_chain_index is not None:
lowerCAmelCase__ = {}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(lowercase__ ) , [] )
parent_dict[str(lowercase__ )].append(lowercase__ )
lowerCAmelCase__ = max([int(lowercase__ ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowerCAmelCase__ = parent_dict.get(str(lowercase__ ) , ['''N/A'''] )
parents_per_chain.append(lowercase__ )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowerCAmelCase__ = [['''N/A''']]
def make_parent_line(lowercase__ : Sequence[str] ) -> str:
return f'PARENT {" ".join(lowercase__ )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowerCAmelCase__ = 0
for i, l in enumerate(lowercase__ ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(lowercase__ )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(lowercase__ ):
lowerCAmelCase__ = parents_per_chain[chain_counter]
else:
lowerCAmelCase__ = ['''N/A''']
out_pdb_lines.append(make_parent_line(lowercase__ ) )
return "\n".join(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Protein ) -> str:
'''simple docstring'''
lowerCAmelCase__ = residue_constants.restypes + ['''X''']
def res_atoa(lowercase__ : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowerCAmelCase__ = residue_constants.atom_types
lowerCAmelCase__ = []
lowerCAmelCase__ = prot.atom_mask
lowerCAmelCase__ = prot.aatype
lowerCAmelCase__ = prot.atom_positions
lowerCAmelCase__ = prot.residue_index.astype(np.intaa )
lowerCAmelCase__ = prot.b_factors
lowerCAmelCase__ = prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowerCAmelCase__ = get_pdb_headers(lowercase__ )
if len(lowercase__ ) > 0:
pdb_lines.extend(lowercase__ )
lowerCAmelCase__ = aatype.shape[0]
lowerCAmelCase__ = 1
lowerCAmelCase__ = 0
lowerCAmelCase__ = string.ascii_uppercase
lowerCAmelCase__ = None
# Add all atom sites.
for i in range(lowercase__ ):
lowerCAmelCase__ = res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(lowercase__ , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowerCAmelCase__ = '''ATOM'''
lowerCAmelCase__ = atom_name if len(lowercase__ ) == 4 else f' {atom_name}'
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = 1.00
lowerCAmelCase__ = atom_name[0] # Protein supports only C, N, O, S, this works.
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = '''A'''
if chain_index is not None:
lowerCAmelCase__ = chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowerCAmelCase__ = (
f'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
f'{res_name_a:>3} {chain_tag:>1}'
f'{residue_index[i]:>4}{insertion_code:>1} '
f'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
f'{occupancy:>6.2f}{b_factor:>6.2f} '
f'{element:>2}{charge:>2}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
lowerCAmelCase__ = i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowerCAmelCase__ = True
lowerCAmelCase__ = chain_index[i + 1]
if should_terminate:
# Close the chain.
lowerCAmelCase__ = '''TER'''
lowerCAmelCase__ = (
f'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(lowercase__ )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(lowercase__ , lowercase__ ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Protein ) -> np.ndarray:
'''simple docstring'''
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCAmelCase_ (lowercase__ : FeatureDict , lowercase__ : ModelOutput , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[np.ndarray] = None , lowercase__ : Optional[str] = None , lowercase__ : Optional[Sequence[str]] = None , lowercase__ : Optional[Sequence[int]] = None , ) -> Protein:
'''simple docstring'''
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=lowercase__ , remark=lowercase__ , parents=lowercase__ , parents_chain_index=lowercase__ , )
| 668 | 0 |
"""simple docstring"""
import os
from distutils.util import strtobool
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for e in env_keys:
__SCREAMING_SNAKE_CASE = int(os.environ.get(lowercase__ , -1 ) )
if val >= 0:
return val
return default
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.environ.get(lowercase__ , str(lowercase__ ) )
return strtobool(lowercase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_="no" ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.environ.get(lowercase__ , str(lowercase__ ) )
return value
| 682 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_UpperCAmelCase : Optional[Any] = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCAmelCase_ (lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def lowerCAmelCase_ (lowercase__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any ) -> Optional[int]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : int ) -> int:
'''simple docstring'''
if exitstatus == 5:
lowerCAmelCase__ = 0
# Doctest custom flag to ignore output.
_UpperCAmelCase : Any = doctest.register_optionflag("IGNORE_RESULT")
_UpperCAmelCase : Dict = doctest.OutputChecker
class lowerCAmelCase_ ( snake_case__ ):
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Union[str, Any] = CustomOutputChecker
_UpperCAmelCase : Dict = HfDoctestModule
_UpperCAmelCase : List[str] = HfDocTestParser
| 668 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class UpperCAmelCase__ ( snake_case__ ):
"""simple docstring"""
a = 'microsoft/speecht5_tts'
a = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
a = 'text_reader'
a = SpeechTaProcessor
a = SpeechTaForTextToSpeech
a = SpeechTaHifiGan
a = ['text']
a = ['audio']
def lowercase_ ( self : List[Any] ) -> Dict:
if self.post_processor is None:
SCREAMING_SNAKE_CASE__ = '''microsoft/speecht5_hifigan'''
super().setup()
def lowercase_ ( self : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Tuple=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.pre_processor(text=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' , truncation=SCREAMING_SNAKE_CASE_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('''Datasets needs to be installed if not passing speaker embeddings.''' )
SCREAMING_SNAKE_CASE__ = load_dataset('''Matthijs/cmu-arctic-xvectors''' , split='''validation''' )
SCREAMING_SNAKE_CASE__ = torch.tensor(embeddings_dataset[7305]['''xvector'''] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def lowercase_ ( self : Tuple , __lowerCamelCase : List[Any] ) -> List[Any]:
with torch.no_grad():
return self.model.generate_speech(**SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self : Tuple , __lowerCamelCase : List[str] ) -> Optional[int]:
with torch.no_grad():
return self.post_processor(SCREAMING_SNAKE_CASE_ ).cpu().detach()
| 493 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for _ in range(lowercase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
lowerCAmelCase__ , lowerCAmelCase__ = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
_UpperCAmelCase : Union[str, Any] = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 668 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase_ ( snake_case__ , unittest.TestCase ):
_lowercase : Any = LEDTokenizer
_lowercase : int = LEDTokenizerFast
_lowercase : int = True
def lowerCAmelCase_ ( self : str ):
super().setUp()
__A : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__A : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
__A : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__A : List[Any] = {"""unk_token""": """<unk>"""}
__A : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE_ ) )
def lowerCAmelCase_ ( self : List[Any] , **__A : Dict ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( self : Optional[int] , **__A : str ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( self : Any , __A : Optional[int] ):
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase_ ( self : Optional[Any] ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCAmelCase_ ( self : List[Any] ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCAmelCase_ ( self : Optional[Any] ):
__A : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__A : List[Any] = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A : Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , max_length=len(SCREAMING_SNAKE_CASE_ ) , padding=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__A : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
__A : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A : Any = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertIn("""input_ids""" , SCREAMING_SNAKE_CASE_ )
self.assertIn("""attention_mask""" , SCREAMING_SNAKE_CASE_ )
self.assertNotIn("""labels""" , SCREAMING_SNAKE_CASE_ )
self.assertNotIn("""decoder_attention_mask""" , SCREAMING_SNAKE_CASE_ )
@require_torch
def lowerCAmelCase_ ( self : Union[str, Any] ):
__A : Any = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A : Dict = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCAmelCase_ ( self : str ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A : Optional[int] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
__A : List[Any] = ["""A long paragraph for summarization."""]
__A : Union[str, Any] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A : Tuple = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
__A : Optional[int] = tokenizer(text_target=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
__A : Tuple = inputs["""input_ids"""]
__A : List[Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCAmelCase_ ( self : Dict ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__A : Dict = ["""Summary of the text.""", """Another summary."""]
__A : Optional[int] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
__A : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ )
__A : Tuple = [[0] * len(SCREAMING_SNAKE_CASE_ ) for x in encoded_output["""input_ids"""]]
__A : List[str] = tokenizer.pad(SCREAMING_SNAKE_CASE_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ( self : Tuple ):
pass
def lowerCAmelCase_ ( self : int ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__A : Dict = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__A : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__A : List[Any] = """A, <mask> AllenNLP sentence."""
__A : Dict = tokenizer_r.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
__A : int = tokenizer_p.encode_plus(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
__A : Tuple = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__A : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
SCREAMING_SNAKE_CASE_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 17 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=13 , SCREAMING_SNAKE_CASE_ : Dict=7 , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[int]=False , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : str=99 , SCREAMING_SNAKE_CASE_ : str=32 , SCREAMING_SNAKE_CASE_ : int=5 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Tuple=37 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : List[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Dict=512 , SCREAMING_SNAKE_CASE_ : Any=16 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=4 , SCREAMING_SNAKE_CASE_ : int=None , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = intermediate_size
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __snake_case ( self : Tuple ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = DistilBertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = DistilBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase__ = DistilBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = DistilBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = DistilBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Optional[int] ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) = config_and_inputs
lowerCAmelCase__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase_ :Any = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ :Union[str, Any] = (
{
'feature-extraction': DistilBertModel,
'fill-mask': DistilBertForMaskedLM,
'question-answering': DistilBertForQuestionAnswering,
'text-classification': DistilBertForSequenceClassification,
'token-classification': DistilBertForTokenClassification,
'zero-shot': DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ :int = True
UpperCamelCase_ :List[str] = True
UpperCamelCase_ :List[Any] = True
UpperCamelCase_ :Dict = True
def __snake_case ( self : Dict ):
lowerCAmelCase__ = DistilBertModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , dim=37 )
def __snake_case ( self : List[Any] ):
self.config_tester.run_common_tests()
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Dict ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Union[str, Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : int ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : Optional[Any] ):
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case ( self : Tuple ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = DistilBertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@slow
@require_torch_gpu
def __snake_case ( self : Any ):
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(config=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.jit.trace(
SCREAMING_SNAKE_CASE_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) )
lowerCAmelCase__ = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE_ , '''traced_model.pt''' ) , map_location=SCREAMING_SNAKE_CASE_ )
loaded(inputs_dict['''input_ids'''].to(SCREAMING_SNAKE_CASE_ ) , inputs_dict['''attention_mask'''].to(SCREAMING_SNAKE_CASE_ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def __snake_case ( self : str ):
lowerCAmelCase__ = DistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowerCAmelCase__ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase__ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = torch.tensor(
[[[-0.1_639, 0.3_299, 0.1_648], [-0.1_746, 0.3_289, 0.1_710], [-0.1_884, 0.3_357, 0.1_810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
| 668 | 0 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=99 , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=16 , snake_case_=2 , snake_case_=0.02 , snake_case_=3 , snake_case_=4 , snake_case_=None , ):
lowercase =parent
lowercase =13
lowercase =7
lowercase =True
lowercase =True
lowercase =True
lowercase =True
lowercase =99
lowercase =3_84
lowercase =2
lowercase =4
lowercase =37
lowercase ='''gelu'''
lowercase =0.1
lowercase =0.1
lowercase =5_12
lowercase =16
lowercase =2
lowercase =0.02
lowercase =3
lowercase =4
lowercase =1_28
lowercase =2
lowercase =9
lowercase =1
lowercase =None
def _A( self ):
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase =None
if self.use_input_mask:
lowercase =random_attention_mask([self.batch_size, self.seq_length] )
lowercase =None
if self.use_token_type_ids:
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase =None
lowercase =None
lowercase =None
if self.use_labels:
lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase =ids_tensor([self.batch_size] , self.num_choices )
lowercase =ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =TFConvBertModel(config=SCREAMING_SNAKE_CASE_ )
lowercase ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase =[input_ids, input_mask]
lowercase =model(SCREAMING_SNAKE_CASE_ )
lowercase =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =TFConvBertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowercase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =self.num_labels
lowercase =TFConvBertForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowercase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =self.num_choices
lowercase =TFConvBertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
lowercase =tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowercase =tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowercase =tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , (1, self.num_choices, 1) )
lowercase ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =self.num_labels
lowercase =TFConvBertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowercase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =TFConvBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowercase ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _A( self ):
lowercase =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) =config_and_inputs
lowercase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCamelCase__ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase__ = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self ):
lowercase =TFConvBertModelTester(self )
lowercase =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def _A( self ):
self.config_tester.run_common_tests()
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =True
lowercase =True
if hasattr(SCREAMING_SNAKE_CASE_ , '''use_cache''' ):
lowercase =True
lowercase =getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowercase =getattr(self.model_tester , '''key_length''' , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
lowercase =self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase =model_class(SCREAMING_SNAKE_CASE_ )
lowercase =len(model(SCREAMING_SNAKE_CASE_ ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE_ , saved_model=SCREAMING_SNAKE_CASE_ )
lowercase =os.path.join(SCREAMING_SNAKE_CASE_ , '''saved_model''' , '''1''' )
lowercase =tf.keras.models.load_model(SCREAMING_SNAKE_CASE_ )
lowercase =model(SCREAMING_SNAKE_CASE_ )
if self.is_encoder_decoder:
lowercase =outputs['''encoder_hidden_states''']
lowercase =outputs['''encoder_attentions''']
else:
lowercase =outputs['''hidden_states''']
lowercase =outputs['''attentions''']
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
lowercase =getattr(
self.model_tester , '''expected_num_hidden_layers''' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _A( self ):
lowercase =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =True
lowercase =getattr(self.model_tester , '''decoder_seq_length''' , self.model_tester.seq_length )
lowercase =getattr(self.model_tester , '''encoder_seq_length''' , self.model_tester.seq_length )
lowercase =getattr(self.model_tester , '''key_length''' , SCREAMING_SNAKE_CASE_ )
lowercase =getattr(self.model_tester , '''key_length''' , SCREAMING_SNAKE_CASE_ )
def check_decoder_attentions_output(snake_case_ ):
lowercase =len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(out_len % 2 , 0 )
lowercase =outputs.decoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case_ ):
lowercase =[
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
lowercase =True
lowercase =False
lowercase =model_class(SCREAMING_SNAKE_CASE_ )
lowercase =model(self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowercase =len(SCREAMING_SNAKE_CASE_ )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE_ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE_ )
if self.is_encoder_decoder:
lowercase =model_class(SCREAMING_SNAKE_CASE_ )
lowercase =model(self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE_ )
check_decoder_attentions_output(SCREAMING_SNAKE_CASE_ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
lowercase =True
lowercase =model_class(SCREAMING_SNAKE_CASE_ )
lowercase =model(self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(config.output_hidden_states , SCREAMING_SNAKE_CASE_ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE_ )
# Check attention is always last and order is fine
lowercase =True
lowercase =True
lowercase =model_class(SCREAMING_SNAKE_CASE_ )
lowercase =model(self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(model.config.output_hidden_states , SCREAMING_SNAKE_CASE_ )
check_encoder_attentions_output(SCREAMING_SNAKE_CASE_ )
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def _A( self ):
lowercase =TFConvBertModel.from_pretrained('''YituTech/conv-bert-base''' )
lowercase =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase =model(SCREAMING_SNAKE_CASE_ )[0]
lowercase =[1, 6, 7_68]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowercase =tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
| 72 |
from typing import Any
def lowerCAmelCase_ (lowercase__ : list , lowercase__ : list , lowercase__ : dict , lowercase__ : dict , lowercase__ : dict , ) -> list:
'''simple docstring'''
_validation(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
# Creates data structures and fill initial step
lowerCAmelCase__ = {}
lowerCAmelCase__ = {}
for state in states_space:
lowerCAmelCase__ = observations_space[0]
lowerCAmelCase__ = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
lowerCAmelCase__ = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowercase__ ) ):
lowerCAmelCase__ = observations_space[o]
lowerCAmelCase__ = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
# Update probabilities and pointers dicts
lowerCAmelCase__ = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
lowerCAmelCase__ = arg_max
# The final observation
lowerCAmelCase__ = observations_space[len(lowercase__ ) - 1]
# argmax for given final observation
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = -1
for k_state in states_space:
lowerCAmelCase__ = probabilities[(k_state, final_observation)]
if probability > max_probability:
lowerCAmelCase__ = probability
lowerCAmelCase__ = k_state
lowerCAmelCase__ = arg_max
# Process pointers backwards
lowerCAmelCase__ = last_state
lowerCAmelCase__ = []
for o in range(len(lowercase__ ) - 1 , -1 , -1 ):
result.append(lowercase__ )
lowerCAmelCase__ = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_not_empty(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , )
_validate_lists(lowercase__ , lowercase__ )
_validate_dicts(
lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any ) -> None:
'''simple docstring'''
_validate_list(lowercase__ , '''observations_space''' )
_validate_list(lowercase__ , '''states_space''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list'
raise ValueError(lowercase__ )
else:
for x in _object:
if not isinstance(lowercase__ , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a list of strings'
raise ValueError(lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Any , lowercase__ : Any , ) -> None:
'''simple docstring'''
_validate_dict(lowercase__ , '''initial_probabilities''' , lowercase__ )
_validate_nested_dict(lowercase__ , '''transition_probabilities''' )
_validate_nested_dict(lowercase__ , '''emission_probabilities''' )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str ) -> None:
'''simple docstring'''
_validate_dict(_object , lowercase__ , lowercase__ )
for x in _object.values():
_validate_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : str , lowercase__ : type , lowercase__ : bool = False ) -> None:
'''simple docstring'''
if not isinstance(_object , lowercase__ ):
lowerCAmelCase__ = f'{var_name} must be a dict'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object ):
lowerCAmelCase__ = f'{var_name} all keys must be strings'
raise ValueError(lowercase__ )
if not all(isinstance(lowercase__ , lowercase__ ) for x in _object.values() ):
lowerCAmelCase__ = '''nested dictionary ''' if nested else ''''''
lowerCAmelCase__ = f'{var_name} {nested_text}all values must be {value_type.__name__}'
raise ValueError(lowercase__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 668 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
a__: Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
def __init__( self,__lowerCamelCase,__lowerCamelCase ):
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_,scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self,__lowerCamelCase = 1,__lowerCamelCase = 100,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = True,):
if audio_length_in_s is None:
A__ = self.unet.config.sample_size / self.unet.config.sample_rate
A__ = audio_length_in_s * self.unet.config.sample_rate
A__ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"{audio_length_in_s} is too small. Make sure it\'s bigger or equal to"
f" {3 * down_scale_factor / self.unet.config.sample_rate}." )
A__ = int(SCREAMING_SNAKE_CASE_ )
if sample_size % down_scale_factor != 0:
A__ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"
f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"
''' process.''' )
A__ = int(SCREAMING_SNAKE_CASE_ )
A__ = next(iter(self.unet.parameters() ) ).dtype
A__ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(SCREAMING_SNAKE_CASE_,SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f"You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch"
f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
A__ = randn_tensor(SCREAMING_SNAKE_CASE_,generator=SCREAMING_SNAKE_CASE_,device=self.device,dtype=SCREAMING_SNAKE_CASE_ )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_,device=audio.device )
A__ = self.scheduler.timesteps.to(SCREAMING_SNAKE_CASE_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
A__ = self.unet(SCREAMING_SNAKE_CASE_,SCREAMING_SNAKE_CASE_ ).sample
# 2. compute previous image: x_t -> t_t-1
A__ = self.scheduler.step(SCREAMING_SNAKE_CASE_,SCREAMING_SNAKE_CASE_,SCREAMING_SNAKE_CASE_ ).prev_sample
A__ = audio.clamp(-1,1 ).float().cpu().numpy()
A__ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=SCREAMING_SNAKE_CASE_ )
| 190 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = ['audio_values', 'audio_mask']
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=2_048 , SCREAMING_SNAKE_CASE_ : Dict=1 , SCREAMING_SNAKE_CASE_ : Dict=[16, 16] , SCREAMING_SNAKE_CASE_ : Tuple=128 , SCREAMING_SNAKE_CASE_ : Optional[Any]=44_100 , SCREAMING_SNAKE_CASE_ : Optional[int]=86 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_048 , SCREAMING_SNAKE_CASE_ : List[Any]=0.0 , **SCREAMING_SNAKE_CASE_ : int , ):
super().__init__(
feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ = spectrogram_length
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = feature_size // self.patch_size[1]
lowerCAmelCase__ = n_fft
lowerCAmelCase__ = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase__ = sampling_rate
lowerCAmelCase__ = padding_value
lowerCAmelCase__ = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=SCREAMING_SNAKE_CASE_ , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=SCREAMING_SNAKE_CASE_ , norm='''slaney''' , mel_scale='''slaney''' , ).T
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : np.array ):
lowerCAmelCase__ = spectrogram(
SCREAMING_SNAKE_CASE_ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=80.0 , )
lowerCAmelCase__ = log_spec[:, :-1]
lowerCAmelCase__ = log_spec - 20.0
lowerCAmelCase__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'''This feature extractor is set to support sampling rate'''
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowerCAmelCase__ = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
lowerCAmelCase__ = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
lowerCAmelCase__ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase__ = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase__ = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase__ = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase__ = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase__ = np.ones([len(SCREAMING_SNAKE_CASE_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase__ = padded_audio_features * self.padding_value
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = audio_features[i]
lowerCAmelCase__ = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase__ = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask}
else:
lowerCAmelCase__ = {'''audio_values''': padded_audio_features}
lowerCAmelCase__ = BatchFeature(data=SCREAMING_SNAKE_CASE_ , tensor_type=SCREAMING_SNAKE_CASE_ )
return encoded_inputs
| 668 | 0 |
'''simple docstring'''
def lowerCamelCase( SCREAMING_SNAKE_CASE_ = 6008_5147_5143 ) -> int:
try:
A_ = int(lowercase__ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
A_ = 1
A_ = 2
while i * i <= n:
while n % i == 0:
A_ = i
n //= i
i += 1
if n > 1:
A_ = n
return int(lowercase__ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 366 |
from collections import namedtuple
_UpperCAmelCase : Dict = namedtuple("from_to", "from_ to")
_UpperCAmelCase : str = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_000),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def lowerCAmelCase_ (lowercase__ : float , lowercase__ : str , lowercase__ : str ) -> float:
'''simple docstring'''
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ''', '''.join(lowercase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __UpperCamelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=13 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=64 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=64 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=16 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Optional[int]:
a : str = parent
a : Tuple = batch_size
a : str = seq_length
a : Any = is_training
a : Optional[Any] = use_input_mask
a : Dict = use_token_type_ids
a : Tuple = use_labels
a : int = vocab_size
a : int = hidden_size
a : Tuple = num_hidden_layers
a : Any = num_attention_heads
a : List[Any] = intermediate_size
a : Dict = hidden_act
a : str = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Dict = type_vocab_size
a : int = type_sequence_label_size
a : Any = initializer_range
a : Optional[int] = num_labels
a : int = num_choices
a : str = scope
def __a ( self ) -> Dict:
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def __a ( self ) -> Optional[Any]:
a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : int = None
if self.use_input_mask:
a : int = random_attention_mask([self.batch_size, self.seq_length] )
a : int = None
a : str = None
a : Dict = None
if self.use_labels:
a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : int = ids_tensor([self.batch_size] , self.num_choices )
a : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __a ( self ) -> Union[str, Any]:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : Optional[int] = MPNetModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
a : Dict = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a : Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
a : int = MPNetForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
a : Union[str, Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
a : Any = self.num_labels
a : List[str] = MPNetForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
a : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
a : Optional[int] = self.num_choices
a : int = MPNetForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
a : Tuple = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
a : Any = self.num_labels
a : str = MPNetForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
a : int = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self ) -> Union[str, Any]:
a : Optional[int] = self.prepare_config_and_inputs()
((a), (a), (a), (a), (a), (a)) : str = config_and_inputs
a : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( snake_case__ , snake_case__ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =(
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCamelCase : Dict =(
{
'feature-extraction': MPNetModel,
'fill-mask': MPNetForMaskedLM,
'question-answering': MPNetForQuestionAnswering,
'text-classification': MPNetForSequenceClassification,
'token-classification': MPNetForTokenClassification,
'zero-shot': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase : int =False
lowerCamelCase : Dict =True
def __a ( self ) -> List[Any]:
a : List[Any] = MPNetModelTester(self )
a : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def __a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __a ( self ) -> Any:
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*SCREAMING_SNAKE_CASE_ )
def __a ( self ) -> int:
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def __a ( self ) -> Any:
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def __a ( self ) -> Optional[Any]:
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def __a ( self ) -> str:
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self ) -> Union[str, Any]:
a : Optional[Any] = MPNetModel.from_pretrained("microsoft/mpnet-base" )
a : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
a : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )[0]
a : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
a : int = torch.tensor(
[[[-0.0_550, 0.1_943, -0.0_740], [-0.0_562, 0.2_211, -0.0_579], [-0.0_437, 0.3_337, -0.0_641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 633 |
def lowerCAmelCase_ (lowercase__ : list ) -> list:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ )
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = collection[i]
lowerCAmelCase__ = 0
lowerCAmelCase__ = i - 1
while low <= high:
lowerCAmelCase__ = (low + high) // 2
if val < collection[mid]:
lowerCAmelCase__ = mid - 1
else:
lowerCAmelCase__ = mid + 1
for j in range(lowercase__ , lowercase__ , -1 ):
lowerCAmelCase__ = collection[j - 1]
lowerCAmelCase__ = val
return collection
if __name__ == "__main__":
_UpperCAmelCase : Tuple = input("Enter numbers separated by a comma:\n").strip()
_UpperCAmelCase : Tuple = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 668 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "vocab.json"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
SCREAMING_SNAKE_CASE__ = {"mgp-str": 2_7}
class __lowerCAmelCase ( snake_case__ ):
"""simple docstring"""
A__ : Union[str, Any] = VOCAB_FILES_NAMES
A__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
A__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , _snake_case : Optional[int] , _snake_case : List[Any]="[GO]" , _snake_case : List[Any]="[GO]" , _snake_case : Optional[Any]="[s]" , _snake_case : Any="[GO]" , **_snake_case : Dict ):
"""simple docstring"""
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='utf-8' ) as vocab_handle:
A__ = json.load(SCREAMING_SNAKE_CASE_ )
A__ = {v: k for k, v in self.vocab.items()}
@property
def _a ( self : List[Any] ):
"""simple docstring"""
return len(self.vocab )
def _a ( self : Optional[int] ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def _a ( self : int , _snake_case : str ):
"""simple docstring"""
A__ = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def _a ( self : Any , _snake_case : str ):
"""simple docstring"""
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def _a ( self : int , _snake_case : Optional[int] ):
"""simple docstring"""
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def _a ( self : List[str] , _snake_case : str , _snake_case : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('Vocabulary path ({}) should be a directory'.format(SCREAMING_SNAKE_CASE_ ) )
return
A__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
with open(SCREAMING_SNAKE_CASE_ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
return (vocab_file,)
| 9 |
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : str ) -> bool:
'''simple docstring'''
lowerCAmelCase__ = len(lowercase__ ) + 1
lowerCAmelCase__ = len(lowercase__ ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
lowerCAmelCase__ = [[0 for i in range(lowercase__ )] for j in range(lowercase__ )]
# since string of zero length match pattern of zero length
lowerCAmelCase__ = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , lowercase__ ):
lowerCAmelCase__ = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , lowercase__ ):
lowerCAmelCase__ = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , lowercase__ ):
for j in range(1 , lowercase__ ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
lowerCAmelCase__ = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
lowerCAmelCase__ = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
lowerCAmelCase__ = dp[i - 1][j]
else:
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCAmelCase : Union[str, Any] = "aab"
_UpperCAmelCase : Dict = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'''{input_string} matches the given pattern {pattern}''')
else:
print(F'''{input_string} does not match with the given pattern {pattern}''')
| 668 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
"configuration_longformer": [
"LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LongformerConfig",
"LongformerOnnxConfig",
],
"tokenization_longformer": ["LongformerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["LongformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"LongformerForMaskedLM",
"LongformerForMultipleChoice",
"LongformerForQuestionAnswering",
"LongformerForSequenceClassification",
"LongformerForTokenClassification",
"LongformerModel",
"LongformerPreTrainedModel",
"LongformerSelfAttention",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLongformerForMaskedLM",
"TFLongformerForMultipleChoice",
"TFLongformerForQuestionAnswering",
"TFLongformerForSequenceClassification",
"TFLongformerForTokenClassification",
"TFLongformerModel",
"TFLongformerPreTrainedModel",
"TFLongformerSelfAttention",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 159 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : str = logging.get_logger(__name__)
_UpperCAmelCase : Dict = {"vocab_file": "vocab.json"}
_UpperCAmelCase : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
_UpperCAmelCase : Tuple = {"mgp-str": 27}
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :Union[str, Any] = VOCAB_FILES_NAMES
UpperCamelCase_ :Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : List[Any]="[GO]" , SCREAMING_SNAKE_CASE_ : Optional[Any]="[s]" , SCREAMING_SNAKE_CASE_ : Any="[GO]" , **SCREAMING_SNAKE_CASE_ : Dict ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __snake_case ( self : List[Any] ):
return len(self.vocab )
def __snake_case ( self : Optional[int] ):
return dict(self.vocab , **self.added_tokens_encoder )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(SCREAMING_SNAKE_CASE_ )
return char_tokens
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : str ):
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def __snake_case ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(SCREAMING_SNAKE_CASE_ ) )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
return (vocab_file,)
| 668 | 0 |
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( snake_case__ , unittest.TestCase ):
__lowerCAmelCase : Tuple = PriorTransformer
__lowerCAmelCase : Any = 'hidden_states'
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = 4
A_ = 8
A_ = 7
A_ = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
A_ = floats_tensor((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
A_ = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __UpperCAmelCase ( self ,__snake_case=0 ):
"""simple docstring"""
torch.manual_seed(SCREAMING_SNAKE_CASE_ )
A_ = 4
A_ = 8
A_ = 7
A_ = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
A_ = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
A_ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
return (4, 8)
@property
def __UpperCAmelCase ( self ):
"""simple docstring"""
return (4, 8)
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 4,
'''num_layers''': 2,
'''embedding_dim''': 8,
'''num_embeddings''': 7,
'''additional_embeddings''': 4,
}
A_ = self.dummy_input
return init_dict, inputs_dict
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ , A_ = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' ,output_loading_info=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(SCREAMING_SNAKE_CASE_ )
A_ = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ , A_ = self.prepare_init_args_and_inputs_for_common()
A_ = self.model_class(**SCREAMING_SNAKE_CASE_ )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ['''hidden_states''', '''timestep''']
self.assertListEqual(arg_names[:2] ,SCREAMING_SNAKE_CASE_ )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
A_ = model.to(SCREAMING_SNAKE_CASE_ )
if hasattr(SCREAMING_SNAKE_CASE_ ,'''set_default_attn_processor''' ):
model.set_default_attn_processor()
A_ = self.get_dummy_seed_input()
with torch.no_grad():
A_ = model(**SCREAMING_SNAKE_CASE_ )[0]
A_ = output[0, :5].flatten().cpu()
print(SCREAMING_SNAKE_CASE_ )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
A_ = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,rtol=1E-2 ) )
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ,__snake_case=1 ,__snake_case=7_6_8 ,__snake_case=7_7 ,__snake_case=0 ):
"""simple docstring"""
torch.manual_seed(SCREAMING_SNAKE_CASE_ )
A_ = batch_size
A_ = embedding_dim
A_ = num_embeddings
A_ = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
A_ = torch.randn((batch_size, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
A_ = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(SCREAMING_SNAKE_CASE_ )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def __UpperCAmelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[1_3, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[3_7, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def __UpperCAmelCase ( self ,__snake_case ,__snake_case ):
"""simple docstring"""
A_ = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' ,subfolder='''prior''' )
model.to(SCREAMING_SNAKE_CASE_ )
A_ = self.get_dummy_seed_input(seed=SCREAMING_SNAKE_CASE_ )
with torch.no_grad():
A_ = model(**SCREAMING_SNAKE_CASE_ )[0]
assert list(sample.shape ) == [1, 7_6_8]
A_ = sample[0, :8].flatten().cpu()
print(SCREAMING_SNAKE_CASE_ )
A_ = torch.tensor(SCREAMING_SNAKE_CASE_ )
assert torch_all_close(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,atol=1E-3 )
| 188 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : List[Any] = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 668 | 0 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __UpperCamelCase (lowerCAmelCase : Optional[int] ) -> Optional[int]:
A = filter(lambda lowerCAmelCase : p.requires_grad, model.parameters() )
A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
_UpperCAmelCase = logging.getLogger(__name__)
def __UpperCamelCase (lowerCAmelCase : Any, lowerCAmelCase : Any ) -> str:
if metric == "rouge2":
A = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
A = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
A = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
A = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'''seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'''
' function.' )
A = ModelCheckpoint(
dirpath=lowercase__, filename=lowercase__, monitor=f'''val_{metric}''', mode='max', save_top_k=1, every_n_epochs=1, )
return checkpoint_callback
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : List[str] ) -> Optional[int]:
return EarlyStopping(
monitor=f'''val_{metric}''', mode='min' if 'loss' in metric else 'max', patience=lowercase__, verbose=lowercase__, )
class _UpperCAmelCase ( pl.Callback ):
'''simple docstring'''
def UpperCamelCase ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
A = {f'''lr_group_{i}''': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE_ )
@rank_zero_only
def UpperCamelCase ( self : int , UpperCamelCase__ : pl.Trainer , UpperCamelCase__ : pl.LightningModule , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=True ):
logger.info(f'''***** {type_path} results at step {trainer.global_step:05d} *****''' )
A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
A = Path(pl_module.hparams.output_dir )
if type_path == "test":
A = od / 'test_results.txt'
A = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A = od / f'''{type_path}_results/{trainer.global_step:05d}.txt'''
A = od / f'''{type_path}_generations/{trainer.global_step:05d}.txt'''
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , 'a+' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE_ ):
if key in ["log", "progress_bar", "preds"]:
continue
A = metrics[key]
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ):
A = val.item()
A = f'''{key}: {val:.6f}\n'''
writer.write(SCREAMING_SNAKE_CASE_ )
if not save_generations:
return
if "preds" in metrics:
A = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(SCREAMING_SNAKE_CASE_ )
@rank_zero_only
def UpperCamelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
try:
A = pl_module.model.model.num_parameters()
except AttributeError:
A = pl_module.model.num_parameters()
A = count_trainable_parameters(SCREAMING_SNAKE_CASE_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1e6, 'grad_mp': n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase ( self : Dict , UpperCamelCase__ : pl.Trainer , UpperCamelCase__ : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'test' )
@rank_zero_only
def UpperCamelCase ( self : Optional[Any] , UpperCamelCase__ : pl.Trainer , UpperCamelCase__ : str ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 699 |
from collections import deque
class lowerCAmelCase_ :
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = process_name # process name
lowerCAmelCase__ = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
lowerCAmelCase__ = arrival_time
lowerCAmelCase__ = burst_time # remaining burst time
lowerCAmelCase__ = 0 # total time of the process wait in ready queue
lowerCAmelCase__ = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : list[int] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int , ):
# total number of mlfq's queues
lowerCAmelCase__ = number_of_queues
# time slice of queues that round robin algorithm applied
lowerCAmelCase__ = time_slices
# unfinished process is in this ready_queue
lowerCAmelCase__ = queue
# current time
lowerCAmelCase__ = current_time
# finished process is in this sequence queue
lowerCAmelCase__ = deque()
def __snake_case ( self : Tuple ):
lowerCAmelCase__ = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : list[Process] ):
lowerCAmelCase__ = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
return [q.burst_time for q in queue]
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Process ):
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : deque[Process] ):
lowerCAmelCase__ = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE_ ) != 0:
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
lowerCAmelCase__ = 0
# set the process's turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# set the completion time
lowerCAmelCase__ = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : deque[Process] , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase__ = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE_ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
lowerCAmelCase__ = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE_ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
lowerCAmelCase__ = 0
# set the finish time
lowerCAmelCase__ = self.current_time
# update the process' turnaround time because it is finished
lowerCAmelCase__ = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE_ )
self.finish_queue.extend(SCREAMING_SNAKE_CASE_ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __snake_case ( self : int ):
# all queues except last one have round_robin algorithm
for i in range(self.number_of_queues - 1 ):
lowerCAmelCase__ , lowerCAmelCase__ = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_UpperCAmelCase : List[Any] = Process("P1", 0, 53)
_UpperCAmelCase : Tuple = Process("P2", 0, 17)
_UpperCAmelCase : int = Process("P3", 0, 68)
_UpperCAmelCase : str = Process("P4", 0, 24)
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : List[Any] = [17, 25]
_UpperCAmelCase : Tuple = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
_UpperCAmelCase : Tuple = Process("P1", 0, 53)
_UpperCAmelCase : List[str] = Process("P2", 0, 17)
_UpperCAmelCase : Any = Process("P3", 0, 68)
_UpperCAmelCase : List[Any] = Process("P4", 0, 24)
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : int = [17, 25]
_UpperCAmelCase : str = deque([Pa, Pa, Pa, Pa])
_UpperCAmelCase : Tuple = MLFQ(number_of_queues, time_slices, queue, 0)
_UpperCAmelCase : int = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
F'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
F'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 668 | 0 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ : List[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCamelCase_ ( snake_case__ , unittest.TestCase):
"""simple docstring"""
snake_case__ : str = XLMProphetNetTokenizer
snake_case__ : Dict = False
snake_case__ : Union[str, Any] = True
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = XLMProphetNetTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE = "[PAD]"
__SCREAMING_SNAKE_CASE = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ ( self : int ) -> Any:
__SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "[PAD]" )
self.assertEqual(vocab_keys[1] , "[CLS]" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_0_1_2 )
def UpperCAmelCase_ ( self : Tuple ) -> List[str]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def UpperCAmelCase_ ( self : int ) -> List[str]:
__SCREAMING_SNAKE_CASE = XLMProphetNetTokenizer(SCREAMING_SNAKE_CASE_ , keep_accents=SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"[UNK]",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"[UNK]",
".",
] , )
@cached_property
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased" )
@slow
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = "Hello World!"
__SCREAMING_SNAKE_CASE = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , self.big_tokenizer.encode(SCREAMING_SNAKE_CASE_ ) )
@slow
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Any:
# fmt: off
__SCREAMING_SNAKE_CASE = {"input_ids": [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name="microsoft/xprophetnet-large-wiki100-cased" , revision="1acad1643ddd54a44df6a1b797ada8373685d90e" , )
| 682 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
_UpperCAmelCase : Tuple = "true"
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : int=82 , lowercase__ : str=16 ) -> Tuple:
'''simple docstring'''
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(lowercase__ )
lowerCAmelCase__ = RegressionDataset(length=lowercase__ )
lowerCAmelCase__ = DataLoader(lowercase__ , batch_size=lowercase__ )
model.to(accelerator.device )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return model, ddp_model, dataloader
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
lowerCAmelCase__ = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowercase__ : Any ):
lowerCAmelCase__ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowercase__ , max_length=lowercase__ )
return outputs
with accelerator.main_process_first():
lowerCAmelCase__ = dataset.map(
lowercase__ , batched=lowercase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
lowerCAmelCase__ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowercase__ : Any ):
if use_longest:
return tokenizer.pad(lowercase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowercase__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(lowercase__ , shuffle=lowercase__ , collate_fn=lowercase__ , batch_size=16 )
def lowerCAmelCase_ (lowercase__ : Tuple , lowercase__ : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(dispatch_batches=lowercase__ , split_batches=lowercase__ )
lowerCAmelCase__ = get_dataloader(lowercase__ , not dispatch_batches )
lowerCAmelCase__ = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowercase__ , lowercase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : List[str] , lowercase__ : Tuple ) -> int:
'''simple docstring'''
lowerCAmelCase__ = []
for batch in dataloader:
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
with torch.no_grad():
lowerCAmelCase__ = model(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase__ , lowerCAmelCase__ = [], []
for logit, targ in logits_and_targets:
logits.append(lowercase__ )
targs.append(lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = torch.cat(lowercase__ ), torch.cat(lowercase__ )
return logits, targs
def lowerCAmelCase_ (lowercase__ : Accelerator , lowercase__ : Optional[Any]=82 , lowercase__ : List[Any]=False , lowercase__ : Optional[int]=False , lowercase__ : Union[str, Any]=16 ) -> int:
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_basic_setup(lowercase__ , lowercase__ , lowercase__ )
lowerCAmelCase__ , lowerCAmelCase__ = generate_predictions(lowercase__ , lowercase__ , lowercase__ )
assert (
len(lowercase__ ) == num_samples
), f'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowercase__ )}'
def lowerCAmelCase_ (lowercase__ : bool = False , lowercase__ : bool = False ) -> int:
'''simple docstring'''
lowerCAmelCase__ = evaluate.load('''glue''' , '''mrpc''' )
lowerCAmelCase__ , lowerCAmelCase__ = get_mrpc_setup(lowercase__ , lowercase__ )
# First do baseline
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''no''']
model.to(lowercase__ )
model.eval()
for batch in dataloader:
batch.to(lowercase__ )
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowercase__ , references=batch['''labels'''] )
lowerCAmelCase__ = metric.compute()
# Then do distributed
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase__ = model(**lowercase__ )
lowerCAmelCase__ = outputs.logits.argmax(dim=-1 )
lowerCAmelCase__ = batch['''labels''']
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowercase__ , references=lowercase__ )
lowerCAmelCase__ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def lowerCAmelCase_ () -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(lowercase__ , lowercase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase__ = Accelerator(split_batches=lowercase__ , dispatch_batches=lowercase__ )
if accelerator.is_local_main_process:
print(f'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(lowercase__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
lowerCAmelCase__ = Accelerator()
test_torch_metrics(lowercase__ , 5_12 )
accelerator.state._reset_state()
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> List[str]:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 668 | 0 |
_SCREAMING_SNAKE_CASE : Any = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 493 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : str = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
_UpperCAmelCase : str = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
_UpperCAmelCase : List[str] = {
"ctrl": 256,
}
_UpperCAmelCase : int = {
"Pregnancy": 168_629,
"Christianity": 7_675,
"Explain": 106_423,
"Fitness": 63_440,
"Saving": 63_163,
"Ask": 27_171,
"Ass": 95_985,
"Joke": 163_509,
"Questions": 45_622,
"Thoughts": 49_605,
"Retail": 52_342,
"Feminism": 164_338,
"Writing": 11_992,
"Atheism": 192_263,
"Netflix": 48_616,
"Computing": 39_639,
"Opinion": 43_213,
"Alone": 44_967,
"Funny": 58_917,
"Gaming": 40_358,
"Human": 4_088,
"India": 1_331,
"Joker": 77_138,
"Diet": 36_206,
"Legal": 11_859,
"Norman": 4_939,
"Tip": 72_689,
"Weight": 52_343,
"Movies": 46_273,
"Running": 23_425,
"Science": 2_090,
"Horror": 37_793,
"Confession": 60_572,
"Finance": 12_250,
"Politics": 16_360,
"Scary": 191_985,
"Support": 12_654,
"Technologies": 32_516,
"Teenage": 66_160,
"Event": 32_769,
"Learned": 67_460,
"Notion": 182_770,
"Wikipedia": 37_583,
"Books": 6_665,
"Extract": 76_050,
"Confessions": 102_701,
"Conspiracy": 75_932,
"Links": 63_674,
"Narcissus": 150_425,
"Relationship": 54_766,
"Relationships": 134_796,
"Reviews": 41_671,
"News": 4_256,
"Translation": 26_820,
"multilingual": 128_406,
}
def lowerCAmelCase_ (lowercase__ : Optional[int] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = set()
lowerCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase__ = char
lowerCAmelCase__ = set(lowercase__ )
return pairs
class lowerCAmelCase_ ( snake_case__ ):
UpperCamelCase_ :int = VOCAB_FILES_NAMES
UpperCamelCase_ :str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ :Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ :Optional[int] = CONTROL_CODES
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , **SCREAMING_SNAKE_CASE_ : Tuple ):
super().__init__(unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowerCAmelCase__ = json.load(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowerCAmelCase__ = merges_handle.read().split('''\n''' )[1:-1]
lowerCAmelCase__ = [tuple(merge.split() ) for merge in merges]
lowerCAmelCase__ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowerCAmelCase__ = {}
@property
def __snake_case ( self : List[str] ):
return len(self.encoder )
def __snake_case ( self : Union[str, Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : Any ):
if token in self.cache:
return self.cache[token]
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowerCAmelCase__ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase__ , lowerCAmelCase__ = bigram
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowerCAmelCase__ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase__ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase__ = tuple(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowerCAmelCase__ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = '''@@ '''.join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = word[:-4]
lowerCAmelCase__ = word
return word
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
lowerCAmelCase__ = []
lowerCAmelCase__ = re.findall(R'''\S+\n?''' , SCREAMING_SNAKE_CASE_ )
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def __snake_case ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any ):
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def __snake_case ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
return self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase__ = ''' '''.join(SCREAMING_SNAKE_CASE_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowerCAmelCase__ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowerCAmelCase__ = 0
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowerCAmelCase__ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 668 | 0 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
UpperCAmelCase_ : Optional[Any] = parse(importlib.metadata.version('''torch'''))
def __SCREAMING_SNAKE_CASE ( a__ : Union[str, Version] ,a__ : str ,a__ : str ) -> Any:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f"""`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}""" )
__A : Union[str, Any] = STR_OPERATION_TO_FUNC[operation]
if isinstance(lowercase__ ,lowercase__ ):
__A : Optional[Any] = parse(importlib.metadata.version(lowercase__ ) )
return operation(lowercase__ ,parse(lowercase__ ) )
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : str ) -> Any:
return compare_versions(lowercase__ ,lowercase__ ,lowercase__ )
| 17 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCAmelCase_ :
def __snake_case ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
raise NotImplementedError()
def __snake_case ( self : Union[str, Any] ):
raise NotImplementedError()
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , **SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase__ = tokenizer
lowerCAmelCase__ = skip_prompt
lowerCAmelCase__ = decode_kwargs
# variables used in the streaming process
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
lowerCAmelCase__ = True
def __snake_case ( self : Dict , SCREAMING_SNAKE_CASE_ : List[str] ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('''TextStreamer only supports batch size 1''' )
elif len(value.shape ) > 1:
lowerCAmelCase__ = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
lowerCAmelCase__ = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('''\n''' ):
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
# If the last token is a CJK character, we print the characters.
elif len(SCREAMING_SNAKE_CASE_ ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
lowerCAmelCase__ = text[self.print_len :]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
lowerCAmelCase__ = text[self.print_len : text.rfind(''' ''' ) + 1]
self.print_len += len(SCREAMING_SNAKE_CASE_ )
self.on_finalized_text(SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[Any] ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
lowerCAmelCase__ = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
lowerCAmelCase__ = text[self.print_len :]
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
else:
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = True
self.on_finalized_text(SCREAMING_SNAKE_CASE_ , stream_end=SCREAMING_SNAKE_CASE_ )
def __snake_case ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
print(SCREAMING_SNAKE_CASE_ , flush=SCREAMING_SNAKE_CASE_ , end='''''' if not stream_end else None )
def __snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e00 and cp <= 0x9fff)
or (cp >= 0x3400 and cp <= 0x4dbf) #
or (cp >= 0x2_0000 and cp <= 0x2_a6df) #
or (cp >= 0x2_a700 and cp <= 0x2_b73f) #
or (cp >= 0x2_b740 and cp <= 0x2_b81f) #
or (cp >= 0x2_b820 and cp <= 0x2_ceaf) #
or (cp >= 0xf900 and cp <= 0xfaff)
or (cp >= 0x2_f800 and cp <= 0x2_fa1f) #
): #
return True
return False
class lowerCAmelCase_ ( snake_case__ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : "AutoTokenizer" , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[float] = None , **SCREAMING_SNAKE_CASE_ : List[str] ):
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ = Queue()
lowerCAmelCase__ = None
lowerCAmelCase__ = timeout
def __snake_case ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : bool = False ):
self.text_queue.put(SCREAMING_SNAKE_CASE_ , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self : Optional[int] ):
return self
def __snake_case ( self : int ):
lowerCAmelCase__ = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 668 | 0 |
'''simple docstring'''
from collections import Counter
from timeit import timeit
def UpperCamelCase ( lowercase_ : str = "" , ) -> bool:
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(''' ''' , '''''' ).lower() ).values() ) < 2
def UpperCamelCase ( lowercase_ : str = "" ) -> bool:
'''simple docstring'''
if len(lowercase__ ) == 0:
return True
lowercase =input_str.replace(''' ''' , '''''' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase ={}
for character in lower_case_input_str:
lowercase =character_freq_dict.get(lowercase__ , 0 ) + 1
lowercase =0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def UpperCamelCase ( lowercase_ : str = "" ) -> None:
'''simple docstring'''
print('''\nFor string = ''' , lowercase__ , ''':''' )
print(
'''> can_string_be_rearranged_as_palindrome_counter()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome_counter(lowercase__ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome_counter(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
print(
'''> can_string_be_rearranged_as_palindrome()''' , '''\tans =''' , can_string_be_rearranged_as_palindrome(lowercase__ ) , '''\ttime =''' , timeit(
'''z.can_string_be_rearranged_as_palindrome(z.check_str)''' , setup='''import __main__ as z''' , ) , '''seconds''' , )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
_UpperCAmelCase : Optional[int] = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {'' if status else 'not '}be rearranged as a palindrome""")
| 72 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_UpperCAmelCase : Union[str, Any] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[Any] = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 668 | 0 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def UpperCamelCase__( UpperCamelCase__ : Optional[int] )->int:
return EnvironmentCommand()
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
@staticmethod
def UpperCamelCase ( __lowerCamelCase ):
A__ = parser.add_parser('''env''' )
download_parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( self ):
A__ = huggingface_hub.__version__
A__ = '''not installed'''
A__ = '''NA'''
if is_torch_available():
import torch
A__ = torch.__version__
A__ = torch.cuda.is_available()
A__ = '''not installed'''
if is_transformers_available():
import transformers
A__ = transformers.__version__
A__ = '''not installed'''
if is_accelerate_available():
import accelerate
A__ = accelerate.__version__
A__ = '''not installed'''
if is_xformers_available():
import xformers
A__ = xformers.__version__
A__ = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"{pt_version} ({pt_cuda_available})",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(SCREAMING_SNAKE_CASE_ ) )
return info
@staticmethod
def UpperCamelCase ( __lowerCamelCase ):
return "\n".join([f"- {prop}: {val}" for prop, val in d.items()] ) + "\n"
| 190 |
from __future__ import annotations
def lowerCAmelCase_ (lowercase__ : list[int] , lowercase__ : list[int] , lowercase__ : int ) -> tuple[float, list[float]]:
'''simple docstring'''
lowerCAmelCase__ = list(range(len(lowercase__ ) ) )
lowerCAmelCase__ = [v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
lowerCAmelCase__ = 0
lowerCAmelCase__ = [0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
lowerCAmelCase__ = 1
max_value += value[i]
capacity -= weight[i]
else:
lowerCAmelCase__ = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 668 | 0 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
assert isinstance(lowercase__ ,lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
A_ = tmp_path / 'cache'
A_ = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ = TextDatasetReader(lowercase__ ,cache_dir=lowercase__ ,keep_in_memory=lowercase__ ).read()
_check_text_dataset(lowercase__ ,lowercase__ )
@pytest.mark.parametrize(
'features' ,[
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] ,)
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Any:
A_ = tmp_path / 'cache'
A_ = {'text': 'string'}
A_ = features.copy() if features else default_expected_features
A_ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ = TextDatasetReader(lowercase__ ,features=lowercase__ ,cache_dir=lowercase__ ).read()
_check_text_dataset(lowercase__ ,lowercase__ )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Dict:
A_ = tmp_path / 'cache'
A_ = {'text': 'string'}
A_ = TextDatasetReader(lowercase__ ,cache_dir=lowercase__ ,split=lowercase__ ).read()
_check_text_dataset(lowercase__ ,lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' ,[str, list] )
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
if issubclass(lowercase__ ,lowercase__ ):
A_ = text_path
elif issubclass(lowercase__ ,lowercase__ ):
A_ = [text_path]
A_ = tmp_path / 'cache'
A_ = {'text': 'string'}
A_ = TextDatasetReader(lowercase__ ,cache_dir=lowercase__ ).read()
_check_text_dataset(lowercase__ ,lowercase__ )
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=("train",) ) -> Tuple:
assert isinstance(lowercase__ ,lowercase__ )
for split in splits:
A_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> int:
A_ = tmp_path / 'cache'
A_ = {'text': 'string'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ = TextDatasetReader({'train': text_path} ,cache_dir=lowercase__ ,keep_in_memory=lowercase__ ).read()
_check_text_datasetdict(lowercase__ ,lowercase__ )
@pytest.mark.parametrize(
'features' ,[
None,
{'text': 'string'},
{'text': 'int32'},
{'text': 'float32'},
] ,)
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> Any:
A_ = tmp_path / 'cache'
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
A_ = {'text': 'string'}
A_ = features.copy() if features else default_expected_features
A_ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ = TextDatasetReader({'train': text_path} ,features=lowercase__ ,cache_dir=lowercase__ ).read()
_check_text_datasetdict(lowercase__ ,lowercase__ )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) -> str:
if split:
A_ = {split: text_path}
else:
A_ = 'train'
A_ = {'train': text_path, 'test': text_path}
A_ = tmp_path / 'cache'
A_ = {'text': 'string'}
A_ = TextDatasetReader(lowercase__ ,cache_dir=lowercase__ ).read()
_check_text_datasetdict(lowercase__ ,lowercase__ ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 366 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : List[Any] , lowercase__ : Any ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : Any , lowercase__ : Union[str, Any] , lowercase__ : Optional[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : List[Any] ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ , split=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('''path_type''' , [str, list] )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : Union[str, Any] , lowercase__ : str ) -> Any:
'''simple docstring'''
if issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = parquet_path
elif issubclass(lowercase__ , lowercase__ ):
lowerCAmelCase__ = [parquet_path]
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_dataset(lowercase__ , lowercase__ )
def lowerCAmelCase_ (lowercase__ : List[str] , lowercase__ : str , lowercase__ : Optional[Any]=("train",) ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(lowercase__ , lowercase__ )
for split in splits:
lowerCAmelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('''keep_in_memory''' , [False, True] )
def lowerCAmelCase_ (lowercase__ : List[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase__ = ParquetDatasetReader(
{'''train''': parquet_path} , cache_dir=lowercase__ , keep_in_memory=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize(
'''features''' , [
None,
{'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''},
{'''col_1''': '''string''', '''col_2''': '''string''', '''col_3''': '''string'''},
{'''col_1''': '''int32''', '''col_2''': '''int32''', '''col_3''': '''int32'''},
{'''col_1''': '''float32''', '''col_2''': '''float32''', '''col_3''': '''float32'''},
] , )
def lowerCAmelCase_ (lowercase__ : int , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = features.copy() if features else default_expected_features
lowerCAmelCase__ = (
Features({feature: Value(lowercase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase__ = ParquetDatasetReader({'''train''': parquet_path} , features=lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ )
@pytest.mark.parametrize('''split''' , [None, NamedSplit('''train''' ), '''train''', '''test'''] )
def lowerCAmelCase_ (lowercase__ : str , lowercase__ : Union[str, Any] , lowercase__ : Union[str, Any] ) -> int:
'''simple docstring'''
if split:
lowerCAmelCase__ = {split: parquet_path}
else:
lowerCAmelCase__ = '''train'''
lowerCAmelCase__ = {'''train''': parquet_path, '''test''': parquet_path}
lowerCAmelCase__ = tmp_path / '''cache'''
lowerCAmelCase__ = {'''col_1''': '''string''', '''col_2''': '''int64''', '''col_3''': '''float64'''}
lowerCAmelCase__ = ParquetDatasetReader(lowercase__ , cache_dir=lowercase__ ).read()
_check_parquet_datasetdict(lowercase__ , lowercase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = pq.ParquetFile(tmp_path / '''foo.parquet''' )
lowerCAmelCase__ = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase_ (lowercase__ : Dict , lowercase__ : List[str] ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = str(shared_datadir / '''test_image_rgb.jpg''' )
lowerCAmelCase__ = {'''image''': [image_path]}
lowerCAmelCase__ = Features({'''image''': Image()} )
lowerCAmelCase__ = Dataset.from_dict(lowercase__ , features=lowercase__ )
lowerCAmelCase__ = ParquetDatasetWriter(lowercase__ , tmp_path / '''foo.parquet''' )
assert writer.write() > 0
lowerCAmelCase__ = Dataset.from_parquet(str(tmp_path / '''foo.parquet''' ) )
assert dataset.features == reloaded_dataset.features
lowerCAmelCase__ = ParquetDatasetReader(str(tmp_path / '''foo.parquet''' ) , streaming=lowercase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'''feature, expected''' , [
(Features({'''foo''': Value('''int32''' )} ), None),
(Features({'''image''': Image(), '''foo''': Value('''int32''' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'''nested''': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def lowerCAmelCase_ (lowercase__ : Optional[int] , lowercase__ : str ) -> Tuple:
'''simple docstring'''
assert get_writer_batch_size(lowercase__ ) == expected
| 668 | 0 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class A_ :
'''simple docstring'''
__snake_case = None
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = None
__snake_case = None
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = True
__snake_case = None
__snake_case = 1
__snake_case = None
__snake_case = False
__snake_case = None
__snake_case = None
def _snake_case ( self: List[str] ):
return self.__class__(**{k: copy.deepcopy(a ) for k, v in self.__dict__.items()} )
| 669 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/markuplm-base': 'https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json',
'microsoft/markuplm-large': 'https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json',
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """markuplm"""
def __init__( self: str , a: Optional[Any]=3_0522 , a: Tuple=768 , a: Optional[Any]=12 , a: List[Any]=12 , a: Optional[Any]=3072 , a: Tuple="gelu" , a: List[str]=0.1 , a: Any=0.1 , a: int=512 , a: Union[str, Any]=2 , a: str=0.0_2 , a: Optional[Any]=1e-12 , a: Dict=0 , a: Dict=0 , a: str=2 , a: int=256 , a: Optional[Any]=1024 , a: Optional[int]=216 , a: Optional[int]=1001 , a: Optional[Any]=32 , a: str=50 , a: int="absolute" , a: Union[str, Any]=True , a: Optional[int]=None , **a: Dict , ):
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , **a , )
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : List[Any] = hidden_dropout_prob
__lowerCamelCase : Dict = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Union[str, Any] = type_vocab_size
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : int = layer_norm_eps
__lowerCamelCase : Union[str, Any] = position_embedding_type
__lowerCamelCase : Any = use_cache
__lowerCamelCase : int = classifier_dropout
# additional properties
__lowerCamelCase : List[Any] = max_depth
__lowerCamelCase : Dict = max_xpath_tag_unit_embeddings
__lowerCamelCase : int = max_xpath_subs_unit_embeddings
__lowerCamelCase : Tuple = tag_pad_id
__lowerCamelCase : int = subs_pad_id
__lowerCamelCase : int = xpath_unit_hidden_size
| 669 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """trocr"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Dict , a: List[str]=5_0265 , a: Optional[Any]=1024 , a: Tuple=12 , a: Dict=16 , a: Optional[Any]=4096 , a: Optional[Any]="gelu" , a: Optional[int]=512 , a: int=0.1 , a: str=0.0 , a: Union[str, Any]=0.0 , a: Any=2 , a: Optional[int]=0.0_2 , a: Optional[Any]=0.0 , a: List[Any]=True , a: Any=False , a: int=True , a: Optional[Any]=True , a: Tuple=1 , a: Union[str, Any]=0 , a: Any=2 , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : List[str] = decoder_layers
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : List[str] = decoder_ffn_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : int = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Union[str, Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : str = use_cache
__lowerCamelCase : List[Any] = scale_embedding
__lowerCamelCase : Any = use_learned_position_embeddings
__lowerCamelCase : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 669 | 1 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[str] = f'Expected string as input, found {type(SCREAMING_SNAKE_CASE__ )}'
raise ValueError(SCREAMING_SNAKE_CASE__ )
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = f'Expected boolean as use_pascal parameter, found {type(SCREAMING_SNAKE_CASE__ )}'
raise ValueError(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Tuple = input_str.split('_' )
__lowerCamelCase : Dict = 0 if use_pascal else 1
__lowerCamelCase : List[str] = words[start_index:]
__lowerCamelCase : Dict = [word[0].upper() + word[1:] for word in words_to_capitalize]
__lowerCamelCase : Optional[Any] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 669 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ):
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : str = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ):
return self.tokenizer.decode(*a , **a )
@property
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase_ = logging.get_logger(__name__)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""input_features""", """is_longer"""]
def __init__( self: Union[str, Any] , a: int=64 , a: Tuple=4_8000 , a: str=480 , a: Any=10 , a: Tuple=1024 , a: int=0.0 , a: List[str]=False , a: float = 0 , a: float = 1_4000 , a: int = None , a: str = "fusion" , a: str = "repeatpad" , **a: Optional[int] , ):
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , return_attention_mask=a , **a , )
__lowerCamelCase : Tuple = top_db
__lowerCamelCase : int = truncation
__lowerCamelCase : Tuple = padding
__lowerCamelCase : Dict = fft_window_size
__lowerCamelCase : Tuple = (fft_window_size >> 1) + 1
__lowerCamelCase : Any = hop_length
__lowerCamelCase : Tuple = max_length_s
__lowerCamelCase : Dict = max_length_s * sampling_rate
__lowerCamelCase : str = sampling_rate
__lowerCamelCase : Union[str, Any] = frequency_min
__lowerCamelCase : Any = frequency_max
__lowerCamelCase : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=a , min_frequency=a , max_frequency=a , sampling_rate=a , norm=a , mel_scale='htk' , )
__lowerCamelCase : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=a , min_frequency=a , max_frequency=a , sampling_rate=a , norm='slaney' , mel_scale='slaney' , )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[int] = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Optional[int] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _snake_case ( self: Union[str, Any] , a: np.array , a: Optional[np.array] = None ):
__lowerCamelCase : Dict = spectrogram(
a , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=a , log_mel='dB' , )
return log_mel_spectrogram.T
def _snake_case ( self: Dict , a: Any , a: Tuple , a: Tuple ):
__lowerCamelCase : Tuple = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
__lowerCamelCase : Optional[int] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
__lowerCamelCase : Tuple = [0]
# randomly choose index for each part
__lowerCamelCase : Union[str, Any] = np.random.choice(ranges[0] )
__lowerCamelCase : str = np.random.choice(ranges[1] )
__lowerCamelCase : Dict = np.random.choice(ranges[2] )
__lowerCamelCase : str = mel[idx_front : idx_front + chunk_frames, :]
__lowerCamelCase : Optional[Any] = mel[idx_middle : idx_middle + chunk_frames, :]
__lowerCamelCase : List[Any] = mel[idx_back : idx_back + chunk_frames, :]
__lowerCamelCase : Any = torch.tensor(mel[None, None, :] )
__lowerCamelCase : List[str] = torch.nn.functional.interpolate(
a , size=[chunk_frames, 64] , mode='bilinear' , align_corners=a )
__lowerCamelCase : Optional[int] = mel_shrink[0][0].numpy()
__lowerCamelCase : Optional[Any] = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _snake_case ( self: Optional[Any] , a: np.array , a: Any , a: Optional[Any] , a: int ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
__lowerCamelCase : Union[str, Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
__lowerCamelCase : Dict = len(a ) - max_length
__lowerCamelCase : Any = np.random.randint(0 , overflow + 1 )
__lowerCamelCase : int = waveform[idx : idx + max_length]
__lowerCamelCase : Any = self._np_extract_fbank_features(a , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
__lowerCamelCase : Union[str, Any] = self._np_extract_fbank_features(a , self.mel_filters )
__lowerCamelCase : Any = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
__lowerCamelCase : str = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
__lowerCamelCase : Any = np.stack([mel, mel, mel, mel] , axis=0 )
__lowerCamelCase : Optional[int] = False
else:
__lowerCamelCase : int = self._random_mel_fusion(a , a , a )
__lowerCamelCase : Any = True
else:
raise NotImplementedError(F'data_truncating {truncation} not implemented' )
else:
__lowerCamelCase : str = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
__lowerCamelCase : Optional[Any] = int(max_length / len(a ) )
__lowerCamelCase : List[Any] = np.stack(np.tile(a , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
__lowerCamelCase : Union[str, Any] = int(max_length / len(a ) )
__lowerCamelCase : str = np.stack(np.tile(a , a ) )
__lowerCamelCase : Dict = np.pad(a , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
__lowerCamelCase : Tuple = self._np_extract_fbank_features(a , self.mel_filters )
__lowerCamelCase : str = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
__lowerCamelCase : int = self._np_extract_fbank_features(a , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self: Dict , a: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a: str = None , a: Optional[str] = None , a: Optional[int] = None , a: Optional[int] = None , a: Optional[Union[str, TensorType]] = None , **a: List[str] , ):
__lowerCamelCase : Any = truncation if truncation is not None else self.truncation
__lowerCamelCase : Union[str, Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowerCamelCase : Tuple = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__lowerCamelCase : List[str] = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCamelCase : List[str] = [np.asarray(a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
__lowerCamelCase : List[Any] = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCamelCase : Union[str, Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase : int = [np.asarray(a )]
# convert to mel spectrogram, truncate and pad if needed.
__lowerCamelCase : Optional[int] = [
self._get_input_mel(a , max_length if max_length else self.nb_max_samples , a , a )
for waveform in raw_speech
]
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : List[Any] = []
for mel, longer in padded_inputs:
input_mel.append(a )
is_longer.append(a )
if truncation == "fusion" and sum(a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
__lowerCamelCase : List[Any] = np.random.randint(0 , len(a ) )
__lowerCamelCase : Optional[int] = True
if isinstance(input_mel[0] , a ):
__lowerCamelCase : Optional[Any] = [np.asarray(a , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
__lowerCamelCase : int = [[longer] for longer in is_longer]
__lowerCamelCase : List[Any] = {'input_features': input_mel, 'is_longer': is_longer}
__lowerCamelCase : str = BatchFeature(a )
if return_tensors is not None:
__lowerCamelCase : Dict = input_features.convert_to_tensors(a )
return input_features
| 669 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def _snake_case ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def _snake_case ( self: Dict ):
torch.manual_seed(0 )
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Union[str, Any] = self.dummy_uncond_unet
__lowerCamelCase : List[str] = DDIMScheduler()
__lowerCamelCase : str = self.dummy_vq_model
__lowerCamelCase : Optional[int] = LDMPipeline(unet=a , vqvae=a , scheduler=a )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Any = ldm(generator=a , num_inference_steps=2 , output_type='numpy' ).images
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Dict = ldm(generator=a , num_inference_steps=2 , output_type='numpy' , return_dict=a )[0]
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
__lowerCamelCase : str = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : int = ldm(generator=a , num_inference_steps=5 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : List[Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
__lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 669 | 1 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2 ) ) * actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2 ) )
else:
return a * actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2 ) ) * actual_power(SCREAMING_SNAKE_CASE__ , int(b / 2 ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if b < 0:
return 1 / actual_power(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return actual_power(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
print(power(-2, -3))
| 669 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = 'Usage of script: script_name <size_of_canvas:int>'
lowercase_ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = [[False for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
return canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for j, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = bool(random.getrandbits(1 ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for c, pt in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = __judge_point(
SCREAMING_SNAKE_CASE__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCamelCase : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCamelCase : Tuple = pt
if pt:
if alive < 2:
__lowerCamelCase : Optional[Any] = False
elif alive == 2 or alive == 3:
__lowerCamelCase : Any = True
elif alive > 3:
__lowerCamelCase : Dict = False
else:
if alive == 3:
__lowerCamelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ ,lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(['w', 'k'])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 669 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """pegasus"""
__snake_case = ["""past_key_values"""]
__snake_case = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self: List[Any] , a: Any=5_0265 , a: List[str]=1024 , a: Tuple=12 , a: List[str]=4096 , a: List[Any]=16 , a: int=12 , a: List[str]=4096 , a: str=16 , a: List[Any]=0.0 , a: List[Any]=0.0 , a: Tuple=True , a: List[Any]=True , a: str="gelu" , a: str=1024 , a: int=0.1 , a: Union[str, Any]=0.0 , a: Any=0.0 , a: Optional[Any]=0.0_2 , a: Dict=0 , a: Optional[Any]=False , a: Tuple=0 , a: Union[str, Any]=1 , a: List[Any]=1 , **a: Any , ):
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : Tuple = max_position_embeddings
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : Union[str, Any] = encoder_ffn_dim
__lowerCamelCase : str = encoder_layers
__lowerCamelCase : int = encoder_attention_heads
__lowerCamelCase : Dict = decoder_ffn_dim
__lowerCamelCase : int = decoder_layers
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : Dict = dropout
__lowerCamelCase : Optional[Any] = attention_dropout
__lowerCamelCase : Tuple = activation_dropout
__lowerCamelCase : Tuple = activation_function
__lowerCamelCase : Any = init_std
__lowerCamelCase : Optional[int] = encoder_layerdrop
__lowerCamelCase : List[Any] = decoder_layerdrop
__lowerCamelCase : int = use_cache
__lowerCamelCase : Dict = encoder_layers
__lowerCamelCase : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , forced_eos_token_id=a , **a , )
@property
def _snake_case ( self: Any ):
return self.encoder_attention_heads
@property
def _snake_case ( self: str ):
return self.d_model
| 669 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """char"""
__snake_case = """bpe"""
__snake_case = """wp"""
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """char_tokenizer"""]
__snake_case = """ViTImageProcessor"""
__snake_case = """MgpstrTokenizer"""
def __init__( self: int , a: Dict=None , a: Optional[int]=None , **a: List[str] ):
__lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : Optional[Any] = kwargs.pop('feature_extractor' )
__lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase : int = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: Optional[int]=None , a: List[Any]=None , a: int=None , **a: str ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase : Dict = self.image_processor(a , return_tensors=a , **a )
if text is not None:
__lowerCamelCase : Dict = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : List[str] = encodings['input_ids']
return inputs
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = sequences
__lowerCamelCase : List[str] = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase : str = self._decode_helper(a , 'char' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = self._decode_helper(a , 'bpe' )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self._decode_helper(a , 'wp' )
__lowerCamelCase : Tuple = []
__lowerCamelCase : List[Any] = []
for i in range(a ):
__lowerCamelCase : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase : Any = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Optional[int] = final_strs
__lowerCamelCase : Dict = final_scores
__lowerCamelCase : Dict = char_strs
__lowerCamelCase : List[Any] = bpe_strs
__lowerCamelCase : Tuple = wp_strs
return out
def _snake_case ( self: int , a: Optional[int] , a: Optional[Any] ):
if format == DecodeType.CHARACTER:
__lowerCamelCase : Optional[Any] = self.char_decode
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : List[str] = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase : Dict = self.bpe_decode
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Any = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase : List[str] = self.wp_decode
__lowerCamelCase : int = 102
__lowerCamelCase : Dict = '[SEP]'
else:
raise ValueError(F'Format {format} is not supported.' )
__lowerCamelCase , __lowerCamelCase : int = [], []
__lowerCamelCase : Tuple = pred_logits.size(0 )
__lowerCamelCase : List[Any] = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase : Dict = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
__lowerCamelCase : List[str] = preds_index.view(-1 , a )[:, 1:]
__lowerCamelCase : Dict = decoder(a )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
__lowerCamelCase : List[str] = preds_max_prob[:, 1:]
for index in range(a ):
__lowerCamelCase : str = preds_str[index].find(a )
__lowerCamelCase : Tuple = preds_str[index][:pred_eos]
__lowerCamelCase : Any = preds_index[index].cpu().tolist()
__lowerCamelCase : Any = pred_index.index(a ) if eos_token in pred_index else -1
__lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def _snake_case ( self: Optional[int] , a: Tuple ):
return self.bpe_tokenizer.batch_decode(a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : int = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 669 | 1 |
import warnings
from functools import wraps
from typing import Callable
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
@wraps(SCREAMING_SNAKE_CASE__ )
def _inner_fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
warnings.warn(
(f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , SCREAMING_SNAKE_CASE__ , )
return fn(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
return _inner_fn
| 669 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__lowerCamelCase : Optional[int] = TOKENIZER_CLASSES
else:
__lowerCamelCase : Union[str, Any] = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE__ , tokenizer_name + 'Fast' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__lowerCamelCase : int = TOKENIZER_CLASSES[tokenizer_name]
__lowerCamelCase : Optional[int] = True
if checkpoint_name is None:
__lowerCamelCase : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowerCamelCase : Optional[Any] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowerCamelCase , __lowerCamelCase : Tuple = checkpoint.split('/' )
__lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif add_prefix:
__lowerCamelCase : Any = checkpoint
__lowerCamelCase : Dict = dump_path
else:
__lowerCamelCase : List[str] = None
__lowerCamelCase : Optional[int] = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowerCamelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowerCamelCase : int = file_path.split(SCREAMING_SNAKE_CASE__ )[-1][0]
if next_char == "/":
__lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__lowerCamelCase : Dict = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ , filename_prefix=SCREAMING_SNAKE_CASE__ )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE__ )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowercase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 1 |
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=__UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""speech"""]
def __init__( self: List[str] , *a: Any , **a: Dict ):
requires_backends(self , ['speech'] )
class A_ ( metaclass=__UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""speech"""]
def __init__( self: Optional[int] , *a: Tuple , **a: List[str] ):
requires_backends(self , ['speech'] )
| 669 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = PegasusTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[Any] ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _snake_case ( self: Tuple , **a: List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[Any] , a: int ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = '</s>'
__lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a ) , 1103 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__lowerCamelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: int ):
__lowerCamelCase : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCamelCase : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__lowerCamelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCamelCase : int = 'To ensure a smooth flow of bank resolutions.'
__lowerCamelCase : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : List[str] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _snake_case ( self: str ):
__lowerCamelCase : List[str] = ['This is going to be way too long.' * 150, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : Union[str, Any] = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : List[str] = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
@slow
def _snake_case ( self: List[str] ):
# fmt: off
__lowerCamelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: str ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : str = PegasusTokenizer(a , offset=0 , mask_token_sent=a , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[str] ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _snake_case ( self: Union[str, Any] , **a: Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[str] , a: Any ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__lowerCamelCase : int = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
@require_torch
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = ['This is going to be way too long.' * 1000, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : str = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : Any = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
def _snake_case ( self: Any ):
__lowerCamelCase : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__lowerCamelCase : Dict = self._large_tokenizer(a ).input_ids
self.assertListEqual(
a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 669 | 1 |
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 669 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """roberta"""
def __init__( self: Optional[Any] , a: int=5_0265 , a: Union[str, Any]=768 , a: Optional[Any]=12 , a: List[Any]=12 , a: Optional[int]=3072 , a: Dict="gelu" , a: Optional[int]=0.1 , a: Tuple=0.1 , a: Optional[Any]=512 , a: Tuple=2 , a: List[Any]=0.0_2 , a: List[str]=1e-12 , a: Union[str, Any]=1 , a: Tuple=0 , a: str=2 , a: Tuple="absolute" , a: Optional[Any]=True , a: Union[str, Any]=None , **a: Dict , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : Optional[int] = hidden_size
__lowerCamelCase : Optional[Any] = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_dropout_prob
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : str = type_vocab_size
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : List[str] = use_cache
__lowerCamelCase : str = classifier_dropout
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: int ):
if self.task == "multiple-choice":
__lowerCamelCase : List[str] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 | 1 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 |
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669 | 1 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return base * power(SCREAMING_SNAKE_CASE__ , (exponent - 1) ) if exponent else 1
if __name__ == "__main__":
print('Raise base to the power of exponent using recursion...')
lowercase_ = int(input('Enter the base: ').strip())
lowercase_ = int(input('Enter the exponent: ').strip())
lowercase_ = power(base, abs(exponent))
if exponent < 0: # power() does not properly deal w/ negative exponents
lowercase_ = 1 / result
print(F"""{base} to the power of {exponent} is {result}""")
| 669 |
import math
from datetime import datetime, timedelta
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = year % 19
__lowerCamelCase : int = year % 4
__lowerCamelCase : Any = year % 7
__lowerCamelCase : Dict = math.floor(year / 100 )
__lowerCamelCase : str = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : Optional[int] = leap_day_inhibits / 4
__lowerCamelCase : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : Optional[int] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowercase_ = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 669 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Optional[int] , a: Tuple=13 , a: str=32 , a: int=2 , a: List[str]=3 , a: int=16 , a: List[str]=[1, 2, 1] , a: Any=[2, 2, 4] , a: Optional[int]=2 , a: Tuple=2.0 , a: Optional[int]=True , a: Dict=0.0 , a: int=0.0 , a: List[str]=0.1 , a: List[Any]="gelu" , a: List[str]=False , a: Tuple=True , a: Optional[int]=0.0_2 , a: Any=1e-5 , a: int=True , a: Dict=None , a: List[Any]=True , a: List[Any]=10 , a: int=8 , ):
__lowerCamelCase : List[Any] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Tuple = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Tuple = num_channels
__lowerCamelCase : Tuple = embed_dim
__lowerCamelCase : Optional[Any] = depths
__lowerCamelCase : Any = num_heads
__lowerCamelCase : List[Any] = window_size
__lowerCamelCase : Dict = mlp_ratio
__lowerCamelCase : str = qkv_bias
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : List[str] = attention_probs_dropout_prob
__lowerCamelCase : Dict = drop_path_rate
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : str = use_absolute_embeddings
__lowerCamelCase : Optional[Any] = patch_norm
__lowerCamelCase : str = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Dict = is_training
__lowerCamelCase : Tuple = scope
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : Any = type_sequence_label_size
__lowerCamelCase : List[str] = encoder_stride
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def _snake_case ( self: Any ):
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _snake_case ( self: Tuple , a: int , a: List[Any] , a: Optional[int] ):
__lowerCamelCase : Any = SwinvaModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase : Union[str, Any] = model(a )
__lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase : Optional[int] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _snake_case ( self: int , a: Tuple , a: Optional[Any] , a: Dict ):
__lowerCamelCase : int = SwinvaForMaskedImageModeling(config=a )
model.to(a )
model.eval()
__lowerCamelCase : int = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : str = SwinvaForMaskedImageModeling(a )
model.to(a )
model.eval()
__lowerCamelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _snake_case ( self: List[Any] , a: List[Any] , a: List[Any] , a: Union[str, Any] ):
__lowerCamelCase : Tuple = self.type_sequence_label_size
__lowerCamelCase : Optional[Any] = SwinvaForImageClassification(a )
model.to(a )
model.eval()
__lowerCamelCase : Optional[int] = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = config_and_inputs
__lowerCamelCase : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
__snake_case = (
{"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Tuple = SwinvaModelTester(self )
__lowerCamelCase : Dict = ConfigTester(self , config_class=a , embed_dim=37 )
def _snake_case ( self: Optional[int] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self: str ):
__lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def _snake_case ( self: Dict ):
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def _snake_case ( self: str ):
pass
def _snake_case ( self: Tuple ):
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[int] = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(a )
__lowerCamelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Any = True
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = True
__lowerCamelCase : str = False
__lowerCamelCase : Dict = True
__lowerCamelCase : Tuple = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase : int = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase : Optional[int] = outputs.attentions
__lowerCamelCase : Tuple = len(self.model_tester.depths )
self.assertEqual(len(a ) , a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : Optional[int] = config.window_size**2
__lowerCamelCase : Optional[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase : Tuple = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase : Optional[int] = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
__lowerCamelCase : int = len(a )
# Check attention is always last and order is fine
__lowerCamelCase : int = True
__lowerCamelCase : List[str] = True
__lowerCamelCase : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(a , a ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
__lowerCamelCase : List[str] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
__lowerCamelCase : Tuple = 2
self.assertEqual(out_len + added_hidden_states , len(a ) )
__lowerCamelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _snake_case ( self: Dict , a: Union[str, Any] , a: Union[str, Any] , a: Union[str, Any] , a: str ):
__lowerCamelCase : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(a , a ) )
__lowerCamelCase : List[Any] = outputs.hidden_states
__lowerCamelCase : List[str] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swinv2 has a different seq_length
__lowerCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__lowerCamelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(a ) , a )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Union[str, Any] = reshaped_hidden_states[0].shape
__lowerCamelCase : Dict = (
reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : int = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Union[str, Any] = True
self.check_hidden_states_output(a , a , a , a )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : int = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Dict = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
def _snake_case ( self: Dict ):
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def _snake_case ( self: int ):
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _snake_case ( self: Union[str, Any] ):
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : List[Any] = SwinvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def _snake_case ( self: List[Any] ):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Optional[Any] = _config_zero_init(a )
for model_class in self.all_model_classes:
__lowerCamelCase : Any = model_class(config=a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Any ):
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : List[str] = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a )
__lowerCamelCase : Optional[int] = self.default_image_processor
__lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__lowerCamelCase : Union[str, Any] = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
__lowerCamelCase : int = model(**a )
# verify the logits
__lowerCamelCase : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , a )
__lowerCamelCase : List[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 ) )
| 669 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669 | 1 |
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_config(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).save_pretrained(SCREAMING_SNAKE_CASE__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 669 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
__lowerCamelCase : str = ValueError('a should be a positive number' )
raise my_error
__lowerCamelCase : Tuple = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = (0, 0, 0)
__lowerCamelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowercase_ = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 669 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Dict = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowerCamelCase : Any = get_activation('gelu' )
self.assertTrue(torch.allclose(gelu_python(a ) , torch_builtin(a ) ) )
self.assertFalse(torch.allclose(gelu_python(a ) , gelu_new(a ) ) )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : List[str] = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__lowerCamelCase : Tuple = get_activation('gelu' )
__lowerCamelCase : Any = get_activation('gelu_10' )
__lowerCamelCase : Tuple = torch_builtin(a )
__lowerCamelCase : Optional[Any] = geluaa(a )
__lowerCamelCase : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(a ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _snake_case ( self: Any ):
get_activation('gelu' )
get_activation('gelu_10' )
get_activation('gelu_fast' )
get_activation('gelu_new' )
get_activation('gelu_python' )
get_activation('gelu_pytorch_tanh' )
get_activation('linear' )
get_activation('mish' )
get_activation('quick_gelu' )
get_activation('relu' )
get_activation('sigmoid' )
get_activation('silu' )
get_activation('swish' )
get_activation('tanh' )
with self.assertRaises(a ):
get_activation('bogus' )
with self.assertRaises(a ):
get_activation(a )
def _snake_case ( self: str ):
__lowerCamelCase : Union[str, Any] = get_activation('gelu' )
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : List[Any] = get_activation('gelu' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(a ):
__lowerCamelCase : Any = acta.a
| 669 |
import unittest
from knapsack import greedy_knapsack as kp
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = [10, 20, 30, 40, 50, 60]
__lowerCamelCase : List[str] = [2, 4, 6, 8, 10, 12]
__lowerCamelCase : Tuple = 100
self.assertEqual(kp.calc_profit(a , a , a ) , 210 )
def _snake_case ( self: str ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'Weight can not be negative.' )
def _snake_case ( self: Dict ):
self.assertRaisesRegex(a , 'Profit can not be negative.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: Any ):
self.assertRaisesRegex(
a , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 669 | 1 |
from jiwer import compute_measures
import datasets
lowercase_ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
lowercase_ = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
lowercase_ = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def _snake_case ( self: int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] , )
def _snake_case ( self: Dict , a: Any=None , a: int=None , a: Tuple=False ):
if concatenate_texts:
return compute_measures(a , a )["wer"]
else:
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Tuple = 0
for prediction, reference in zip(a , a ):
__lowerCamelCase : Optional[int] = compute_measures(a , a )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 669 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any]=2 , a: str=3 , a: Any=4 , a: Union[str, Any]=2 , a: Tuple=7 , a: int=True , a: Tuple=True , a: List[str]=True , a: Union[str, Any]=True , a: str=99 , a: Tuple=36 , a: int=2 , a: Dict=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Dict=512 , a: Union[str, Any]=16 , a: str=2 , a: int=0.0_2 , a: Optional[Any]=6 , a: Optional[int]=6 , a: Dict=3 , a: Optional[Any]=4 , a: Optional[Any]=None , a: Dict=1000 , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Any = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[str] = coordinate_size
__lowerCamelCase : int = shape_size
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : int = num_choices
__lowerCamelCase : int = scope
__lowerCamelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCamelCase : Any = text_seq_length
__lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
__lowerCamelCase : Any = self.text_seq_length + self.image_seq_length
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCamelCase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase : List[str] = bbox[i, j, 3]
__lowerCamelCase : str = bbox[i, j, 1]
__lowerCamelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase : Tuple = bbox[i, j, 2]
__lowerCamelCase : Any = bbox[i, j, 0]
__lowerCamelCase : List[str] = tmp_coordinate
__lowerCamelCase : str = tf.constant(a )
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCamelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self: Tuple , a: List[Any] , a: Any , a: List[str] , a: Dict , a: Optional[Any] , a: Dict ):
__lowerCamelCase : Optional[Any] = TFLayoutLMvaModel(config=a )
# text + image
__lowerCamelCase : Optional[Any] = model(a , pixel_values=a , training=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , )
__lowerCamelCase : List[Any] = model(a , bbox=a , pixel_values=a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCamelCase : List[Any] = model(a , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCamelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self: Dict , a: Dict , a: Optional[Any] , a: int , a: Optional[int] , a: List[str] , a: List[str] , a: List[str] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = TFLayoutLMvaForSequenceClassification(config=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Tuple , a: Optional[Any] , a: List[Any] ):
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Any = TFLayoutLMvaForTokenClassification(config=a )
__lowerCamelCase : Optional[Any] = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self: Dict , a: Optional[Any] , a: str , a: Dict , a: Union[str, Any] , a: List[Any] , a: Optional[int] , a: List[str] ):
__lowerCamelCase : List[Any] = 2
__lowerCamelCase : Any = TFLayoutLMvaForQuestionAnswering(config=a )
__lowerCamelCase : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = config_and_inputs
__lowerCamelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: int , a: List[str] , a: Any , a: Optional[Any] , a: Tuple , a: Tuple ):
return True
def _snake_case ( self: str , a: Any , a: Any , a: Optional[int]=False ):
__lowerCamelCase : List[str] = copy.deepcopy(a )
if model_class in get_values(a ):
__lowerCamelCase : Tuple = {
k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a ):
__lowerCamelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : int = TFLayoutLMvaModelTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _snake_case ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(a )
if getattr(a , 'hf_compute_loss' , a ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0]
]
__lowerCamelCase : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
__lowerCamelCase : str = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : List[str] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCamelCase : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCamelCase : Tuple = -100
__lowerCamelCase : Tuple = tf.convert_to_tensor(a )
__lowerCamelCase : Tuple = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : str = model(a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
# Get keys that were added with the _prepare_for_class function
__lowerCamelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
__lowerCamelCase : List[Any] = inspect.signature(model.call ).parameters
__lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCamelCase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
__lowerCamelCase : Dict = signature_names.index(a )
__lowerCamelCase : str = label_key
__lowerCamelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCamelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCamelCase : Optional[int] = prepared_for_class[value]
__lowerCamelCase : Any = tuple(a )
# Send to model
__lowerCamelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self: List[str] ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: int ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: Dict ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a , a , a , a , a , a , a )
@slow
def _snake_case ( self: int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=a , return_tensors='tf' ).pixel_values
__lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
__lowerCamelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCamelCase : int = model(input_ids=a , bbox=a , pixel_values=a , training=a )
# verify the logits
__lowerCamelCase : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a )
__lowerCamelCase : Any = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ) )
| 669 | 1 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowercase_ = logging.get_logger(__name__)
lowercase_ = {'vocab_file': 'vocab.txt', 'emoji_file': 'emoji.json'}
lowercase_ = {
'vocab_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt',
},
'emoji_file': {
'abeja/gpt-neox-japanese-2.7b': 'https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json',
},
}
lowercase_ = {
'abeja/gpt-neox-japanese-2.7b': 2_0_4_8,
}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase : Any = json.loads(f.read() )
__lowerCamelCase : List[str] = collections.OrderedDict()
__lowerCamelCase : List[str] = collections.OrderedDict()
__lowerCamelCase : Any = collections.OrderedDict()
with open(SCREAMING_SNAKE_CASE__ , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase : Union[str, Any] = f.readlines()
__lowerCamelCase : str = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Any = b
__lowerCamelCase : Optional[int] = idx
for wd in b:
__lowerCamelCase : Tuple = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case = ["""input_ids""", """attention_mask"""]
def __init__( self: Union[str, Any] , a: int , a: str , a: Dict="<|endoftext|>" , a: str="<|endoftext|>" , a: Any="<|startoftext|>" , a: List[str]="<|endoftext|>" , a: List[Any]=False , **a: Union[str, Any] , ):
super().__init__(
unk_token=a , pad_token=a , bos_token=a , eos_token=a , do_clean_text=a , **a , )
if not os.path.isfile(a ):
raise ValueError(
F'Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained'
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(a ):
raise ValueError(
F'Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google'
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
__lowerCamelCase : List[str] = do_clean_text
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = load_vocab_and_emoji(a , a )
__lowerCamelCase : Union[str, Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _snake_case ( self: List[str] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _snake_case ( self: Tuple ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _snake_case ( self: str , a: Any ):
return self.subword_tokenizer.tokenize(a , clean=self.do_clean_text )
def _snake_case ( self: str , a: Dict ):
return self.vocab.get(a , self.vocab.get(self.unk_token ) )
def _snake_case ( self: Dict , a: Optional[Any] ):
return self.subword_tokenizer.convert_id_to_token(a )
def _snake_case ( self: str , a: Any ):
__lowerCamelCase : List[str] = ''.join(a ).strip()
return out_string
def _snake_case ( self: List[Any] , a: "Conversation" ):
__lowerCamelCase : Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(a , add_special_tokens=a ) + [self.eos_token_id] )
if len(a ) > self.model_max_length:
__lowerCamelCase : Dict = input_ids[-self.model_max_length :]
return input_ids
def _snake_case ( self: List[Any] , a: str , a: Optional[str] = None ):
__lowerCamelCase : Dict = 0
if os.path.isdir(a ):
__lowerCamelCase : Tuple = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Optional[int] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
__lowerCamelCase : List[Any] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
__lowerCamelCase : List[str] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(a , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
' Please check that the vocabulary is not corrupted!' )
__lowerCamelCase : Optional[int] = token_index
writer.write(','.join(a ) + '\n' )
index += 1
with open(a , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , a )
return vocab_file, emoji_file
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: str , a: Dict , a: List[Any] , a: Dict ):
__lowerCamelCase : str = vocab # same as swe
__lowerCamelCase : Dict = ids_to_tokens # same as bpe
__lowerCamelCase : Tuple = emoji
__lowerCamelCase : int = np.max([len(a ) for w in self.vocab.keys()] )
__lowerCamelCase : Optional[int] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
__lowerCamelCase : Optional[int] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
__lowerCamelCase : Any = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
__lowerCamelCase : Optional[int] = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__lowerCamelCase : Dict = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__lowerCamelCase : Union[str, Any] = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
__lowerCamelCase : int = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
__lowerCamelCase : Optional[Any] = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
__lowerCamelCase : Tuple = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self: Optional[int] ):
return len(self.ids_to_tokens )
def _snake_case ( self: Optional[Any] , a: Tuple ):
__lowerCamelCase : str = self.content_repattera.sub('<URL>' , a )
__lowerCamelCase : List[str] = self.content_repattera.sub('<EMAIL>' , a )
__lowerCamelCase : Tuple = self.content_repattera.sub('<TEL>' , a )
__lowerCamelCase : Union[str, Any] = self.content_repattera.sub('<DATE>' , a )
__lowerCamelCase : str = self.content_repattera.sub('<DATE>' , a )
__lowerCamelCase : List[str] = self.content_repattera.sub('<PRICE>' , a )
__lowerCamelCase : Optional[Any] = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowerCamelCase : Dict = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def _snake_case ( self: int , a: List[Any] , a: Any=False ):
__lowerCamelCase : List[str] = text.replace(' ' , '<SP>' )
__lowerCamelCase : List[str] = text.replace(' ' , '<SP>' )
__lowerCamelCase : Union[str, Any] = text.replace('\r\n' , '<BR>' )
__lowerCamelCase : Optional[int] = text.replace('\n' , '<BR>' )
__lowerCamelCase : int = text.replace('\r' , '<BR>' )
__lowerCamelCase : Union[str, Any] = text.replace('\t' , '<TAB>' )
__lowerCamelCase : Optional[Any] = text.replace('—' , 'ー' )
__lowerCamelCase : Dict = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowerCamelCase : Dict = text.replace(a , a )
if clean:
__lowerCamelCase : int = self.clean_text(a )
def check_simbol(a: Optional[int] ):
__lowerCamelCase : Optional[int] = x.encode()
if len(a ) == 1 and len(a ) == 2:
__lowerCamelCase : str = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0Xc2a1 and c <= 0Xc2bf)
or (c >= 0Xc780 and c <= 0Xc783)
or (c >= 0Xcab9 and c <= 0Xcbbf)
or (c >= 0Xcc80 and c <= 0Xcda2)
):
return True
return False
def checkuae(a: str ):
__lowerCamelCase : Union[str, Any] = x.encode()
if len(a ) == 1 and len(a ) == 3:
__lowerCamelCase : List[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0Xe2_8080 and c <= 0Xe2_b07f:
return True
return False
__lowerCamelCase : int = 0
__lowerCamelCase : Any = []
while pos < len(a ):
__lowerCamelCase : Optional[Any] = min(len(a ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
__lowerCamelCase : Optional[int] = [] # (token_id, token, pos)
for e in range(a , a , -1 ):
__lowerCamelCase : Optional[int] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(a ) > 2:
__lowerCamelCase : Any = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(a ) > 0:
# the smallest token_id is adopted
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = sorted(a , key=lambda a : x[0] )[0]
result.append(a )
__lowerCamelCase : str = e
else:
__lowerCamelCase : Union[str, Any] = pos + 1
__lowerCamelCase : Optional[int] = text[pos:end]
if check_simbol(a ):
result.append('<KIGOU>' )
elif checkuae(a ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
__lowerCamelCase : Dict = end
return result
def _snake_case ( self: Any , a: List[str] , a: str="\n" ):
__lowerCamelCase : str = []
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Optional[Any] = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(a ) > 0:
words.append(bytearray(a ).decode('utf-8' , errors='replace' ) )
__lowerCamelCase : Tuple = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(a )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(a )
if len(a ) > 0:
words.append(bytearray(a ).decode('utf-8' , errors='replace' ) )
__lowerCamelCase : Optional[int] = ''.join(a )
return text
| 669 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669 | 1 |
from __future__ import annotations
import math
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = str(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = [n]
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if len(str(SCREAMING_SNAKE_CASE__ ) ) > 3:
if not is_prime(int(str(SCREAMING_SNAKE_CASE__ )[-3:] ) ) or not is_prime(int(str(SCREAMING_SNAKE_CASE__ )[:3] ) ):
return False
return True
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ = 11 ):
__lowerCamelCase : list[int] = []
__lowerCamelCase : List[str] = 13
while len(SCREAMING_SNAKE_CASE__ ) != count:
if validate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = list_truncated_nums(SCREAMING_SNAKE_CASE__ )
if all(is_prime(SCREAMING_SNAKE_CASE__ ) for i in list_nums ):
list_truncated_primes.append(SCREAMING_SNAKE_CASE__ )
num += 2
return list_truncated_primes
def UpperCamelCase__ ( ):
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(1_1)) = }""")
| 669 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase_ = False
try:
lowercase_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class A_ :
'''simple docstring'''
def __init__( self: int , a: str = None , a: list = [] ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Dict = choices
__lowerCamelCase : Tuple = prompt
if sys.platform == "win32":
__lowerCamelCase : Union[str, Any] = '*'
else:
__lowerCamelCase : Any = '➔ '
def _snake_case ( self: Any , a: Tuple , a: str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def _snake_case ( self: Tuple , a: int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _snake_case ( self: Optional[int] , a: Direction , a: int = 1 ):
__lowerCamelCase : str = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _snake_case ( self: Tuple ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _snake_case ( self: Optional[int] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _snake_case ( self: str ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _snake_case ( self: Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def _snake_case ( self: str ):
__lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
__lowerCamelCase : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def _snake_case ( self: str , a: int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCamelCase : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase : Any = int(builtins.input() )
except ValueError:
__lowerCamelCase : str = default_choice
else:
__lowerCamelCase : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(a , '\n' )
return choice
| 669 | 1 |
from __future__ import annotations
lowercase_ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: dict[str, list[str]] , a: str ):
__lowerCamelCase : int = graph
# mapping node to its parent in resulting breadth first tree
__lowerCamelCase : dict[str, str | None] = {}
__lowerCamelCase : List[Any] = source_vertex
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : List[Any] = {self.source_vertex}
__lowerCamelCase : Dict = None
__lowerCamelCase : Dict = [self.source_vertex] # first in first out queue
while queue:
__lowerCamelCase : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(a )
__lowerCamelCase : str = vertex
queue.append(a )
def _snake_case ( self: Any , a: str ):
if target_vertex == self.source_vertex:
return self.source_vertex
__lowerCamelCase : Optional[int] = self.parent.get(a )
if target_vertex_parent is None:
__lowerCamelCase : int = (
F'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(a )
return self.shortest_path(a ) + F'->{target_vertex}'
if __name__ == "__main__":
lowercase_ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 669 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def _snake_case ( self: Any , **a: Dict ):
__lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a )
return config
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Any = 10
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**a )
scheduler.set_timesteps(a )
__lowerCamelCase : Any = scheduler.timesteps[0]
__lowerCamelCase : List[str] = scheduler.timesteps[1]
__lowerCamelCase : Union[str, Any] = self.dummy_sample
__lowerCamelCase : int = 0.1 * sample
__lowerCamelCase : Optional[Any] = scheduler.step(a , a , a ).prev_sample
__lowerCamelCase : List[str] = scheduler.step(a , a , a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self: Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def _snake_case ( self: List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Tuple = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**a )
__lowerCamelCase : int = 1
scheduler.set_timesteps(a )
__lowerCamelCase : Optional[int] = scheduler.timesteps
__lowerCamelCase : List[str] = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a ):
# 1. scale model input
__lowerCamelCase : List[str] = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Optional[int] = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : str = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : str = pred_prev_sample
__lowerCamelCase : List[str] = torch.sum(torch.abs(a ) )
__lowerCamelCase : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**a )
__lowerCamelCase : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=a )
__lowerCamelCase : Dict = scheduler.timesteps
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Any = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCamelCase : Tuple = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Tuple = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : Any = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : Any = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(a ) )
__lowerCamelCase : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**a )
__lowerCamelCase : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(a , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [39, 30, 12, 1, 0]
__lowerCamelCase : List[Any] = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 669 | 1 |
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[Any] , a: List[str] , a: Optional[int] ):
return F'gaussian_noise_s={seed}_shape={"_".join([str(a ) for s in shape] )}.npy'
def _snake_case ( self: str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def _snake_case ( self: Tuple , a: Union[str, Any]=0 , a: str=(4, 4, 64, 64) , a: Optional[Any]=False ):
__lowerCamelCase : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Union[str, Any] = jnp.array(load_hf_numpy(self.get_file_format(a , a ) ) , dtype=a )
return image
def _snake_case ( self: str , a: Any=False , a: List[str]="CompVis/stable-diffusion-v1-4" ):
__lowerCamelCase : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : Dict = 'bf16' if fpaa else None
__lowerCamelCase , __lowerCamelCase : Optional[int] = FlaxUNetaDConditionModel.from_pretrained(
a , subfolder='unet' , dtype=a , revision=a )
return model, params
def _snake_case ( self: Dict , a: int=0 , a: Optional[Any]=(4, 77, 768) , a: List[Any]=False ):
__lowerCamelCase : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCamelCase : List[str] = jnp.array(load_hf_numpy(self.get_file_format(a , a ) ) , dtype=a )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_3_2_3, -0.1_3_0_4, 0.0_8_1_3, -0.3_0_9_3, -0.0_9_1_9, -0.1_5_7_1, -0.1_1_2_5, -0.5_8_0_6]],
[17, 0.5_5, [-0.0_8_3_1, -0.2_4_4_3, 0.0_9_0_1, -0.0_9_1_9, 0.3_3_9_6, 0.0_1_0_3, -0.3_7_4_3, 0.0_7_0_1]],
[8, 0.8_9, [-0.4_8_6_3, 0.0_8_5_9, 0.0_8_7_5, -0.1_6_5_8, 0.9_1_9_9, -0.0_1_1_4, 0.4_8_3_9, 0.4_6_3_9]],
[3, 1000, [-0.5_6_4_9, 0.2_4_0_2, -0.5_5_1_8, 0.1_2_4_8, 1.1_3_2_8, -0.2_4_4_3, -0.0_3_2_5, -1.0_0_7_8]],
# fmt: on
] )
def _snake_case ( self: Tuple , a: Union[str, Any] , a: Any , a: List[Any] ):
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.get_unet_model(model_id='CompVis/stable-diffusion-v1-4' , fpaa=a )
__lowerCamelCase : Union[str, Any] = self.get_latents(a , fpaa=a )
__lowerCamelCase : str = self.get_encoder_hidden_states(a , fpaa=a )
__lowerCamelCase : Any = model.apply(
{'params': params} , a , jnp.array(a , dtype=jnp.intaa ) , encoder_hidden_states=a , ).sample
assert sample.shape == latents.shape
__lowerCamelCase : Dict = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase : Dict = jnp.array(a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(a , a , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_5_1_4, 0.0_8_0_7, 0.1_6_2_4, 0.1_0_1_6, -0.1_8_9_6, 0.0_2_6_3, 0.0_6_7_7, 0.2_3_1_0]],
[17, 0.5_5, [0.1_1_6_4, -0.0_2_1_6, 0.0_1_7_0, 0.1_5_8_9, -0.3_1_2_0, 0.1_0_0_5, -0.0_5_8_1, -0.1_4_5_8]],
[8, 0.8_9, [-0.1_7_5_8, -0.0_1_6_9, 0.1_0_0_4, -0.1_4_1_1, 0.1_3_1_2, 0.1_1_0_3, -0.1_9_9_6, 0.2_1_3_9]],
[3, 1000, [0.1_2_1_4, 0.0_3_5_2, -0.0_7_3_1, -0.1_5_6_2, -0.0_9_9_4, -0.0_9_0_6, -0.2_3_4_0, -0.0_5_3_9]],
# fmt: on
] )
def _snake_case ( self: Any , a: Any , a: Union[str, Any] , a: str ):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.get_unet_model(model_id='stabilityai/stable-diffusion-2' , fpaa=a )
__lowerCamelCase : Dict = self.get_latents(a , shape=(4, 4, 96, 96) , fpaa=a )
__lowerCamelCase : int = self.get_encoder_hidden_states(a , shape=(4, 77, 1024) , fpaa=a )
__lowerCamelCase : Dict = model.apply(
{'params': params} , a , jnp.array(a , dtype=jnp.intaa ) , encoder_hidden_states=a , ).sample
assert sample.shape == latents.shape
__lowerCamelCase : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__lowerCamelCase : Tuple = jnp.array(a , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(a , a , atol=1e-2 )
| 669 |
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
lowercase_ = input('Enter image url: ').strip()
print(F"""Downloading image from {url} ...""")
lowercase_ = BeautifulSoup(requests.get(url).content, 'html.parser')
# The image URL is in the content field of the first meta tag with property og:image
lowercase_ = soup.find('meta', {'property': 'og:image'})['content']
lowercase_ = requests.get(image_url).content
lowercase_ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, 'wb') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 669 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = 10
def _snake_case ( self: Any ):
__lowerCamelCase : int = [1, 2, 3, 4]
__lowerCamelCase : Optional[int] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(a , self.block_size , 0 ) , a )
def _snake_case ( self: str ):
__lowerCamelCase : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__lowerCamelCase : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(a , self.block_size , 0 ) , a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__lowerCamelCase : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(a , self.block_size , 0 ) , a )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Tuple = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
__lowerCamelCase , __lowerCamelCase : Dict = process_story(a )
self.assertEqual(a , [] )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Union[str, Any] = ''
__lowerCamelCase , __lowerCamelCase : Tuple = process_story(a )
self.assertEqual(a , [] )
self.assertEqual(a , [] )
def _snake_case ( self: str ):
__lowerCamelCase : List[str] = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = process_story(a )
__lowerCamelCase : Union[str, Any] = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(a , a )
__lowerCamelCase : Optional[int] = ['It was the best of times.']
self.assertEqual(a , a )
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = torch.tensor([1, 2, 3, 4] )
__lowerCamelCase : Tuple = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(a , 0 ).numpy() , expected.numpy() )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__lowerCamelCase : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(a , 23 ).numpy() , expected.numpy() )
def _snake_case ( self: str ):
__lowerCamelCase : Union[str, Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowerCamelCase : Dict = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(a , 1 ).numpy() , expected.numpy() )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Dict = 101
__lowerCamelCase : Any = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
__lowerCamelCase : List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowerCamelCase : str = compute_token_type_ids(a , a )
np.testing.assert_array_equal(a , a )
| 669 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowercase_ = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowercase_ = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowercase_ = [file for file in filepaths if ' ' in file]
if space_files:
print(F"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowercase_ = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowercase_ = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowercase_ = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 669 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: Union[str, Any] , a: List[str]=0.0_1 , a: Tuple=1000 ):
__lowerCamelCase : Any = p_stop
__lowerCamelCase : Union[str, Any] = max_length
def __iter__( self: Dict ):
__lowerCamelCase : Union[str, Any] = 0
__lowerCamelCase : List[str] = False
while not stop and count < self.max_length:
yield count
count += 1
__lowerCamelCase : List[str] = random.random() < self.p_stop
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Dict , a: Dict , a: str , a: List[Any]=False , a: Optional[int]=True ):
__lowerCamelCase : int = [
BatchSamplerShard(a , 2 , a , split_batches=a , even_batches=a )
for i in range(2 )
]
__lowerCamelCase : Union[str, Any] = [list(a ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a ) for shard in batch_sampler_shards] , [len(a ) for e in expected] )
self.assertListEqual(a , a )
def _snake_case ( self: str ):
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase : Optional[int] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a )
__lowerCamelCase : Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a , a )
__lowerCamelCase : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a , a )
__lowerCamelCase : Optional[int] = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a , a )
__lowerCamelCase : List[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is very small.
__lowerCamelCase : Union[str, Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : Tuple = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a , a )
__lowerCamelCase : Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : List[Any] = [[], []]
self.check_batch_sampler_shards(a , a )
def _snake_case ( self: Dict ):
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
__lowerCamelCase : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase : Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
__lowerCamelCase : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase : Any = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
__lowerCamelCase : str = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is very small.
__lowerCamelCase : Union[str, Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : int = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a , a , split_batches=a )
__lowerCamelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : Tuple = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a )
def _snake_case ( self: str ):
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
__lowerCamelCase : Any = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase : int = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
__lowerCamelCase : Dict = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase : List[str] = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
__lowerCamelCase : Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
__lowerCamelCase : Dict = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is very small.
__lowerCamelCase : Optional[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : List[Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
__lowerCamelCase : int = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
__lowerCamelCase : List[str] = [[], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
def _snake_case ( self: Optional[int] ):
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase : str = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
__lowerCamelCase : Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase : int = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
__lowerCamelCase : Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase : Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
__lowerCamelCase : int = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is very small.
__lowerCamelCase : List[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : str = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
__lowerCamelCase : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : Union[str, Any] = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
def _snake_case ( self: str ):
__lowerCamelCase : List[str] = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__lowerCamelCase : Any = [BatchSamplerShard(a , 2 , a , even_batches=a ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def _snake_case ( self: Optional[Any] , a: List[Any] , a: Tuple , a: List[Any] , a: List[str]=False , a: List[Any]=2 , a: Tuple=False ):
random.seed(a )
__lowerCamelCase : str = list(a )
__lowerCamelCase : Any = [
IterableDatasetShard(
a , batch_size=a , drop_last=a , num_processes=a , process_index=a , split_batches=a , )
for i in range(a )
]
__lowerCamelCase : int = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a )
iterable_dataset_lists.append(list(a ) )
__lowerCamelCase : Tuple = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowerCamelCase : Tuple = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a ) , len(a ) )
self.assertTrue(len(a ) % shard_batch_size == 0 )
__lowerCamelCase : List[Any] = []
for idx in range(0 , len(a ) , a ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a ) < len(a ):
reference += reference
self.assertListEqual(a , reference[: len(a )] )
def _snake_case ( self: int ):
__lowerCamelCase : Union[str, Any] = 42
__lowerCamelCase : Dict = RandomIterableDataset()
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
# Edge case with a very small dataset
__lowerCamelCase : Optional[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Optional[Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=a )
__lowerCamelCase : int = SkipBatchSampler(a , 2 )
self.assertListEqual(list(a ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = DataLoader(list(range(16 ) ) , batch_size=4 )
__lowerCamelCase : Tuple = skip_first_batches(a , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : int = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def _snake_case ( self: Dict ):
Accelerator()
__lowerCamelCase : Optional[Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 669 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/config.json',
'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/config.json',
'xlm-roberta-large-finetuned-conll02-dutch': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll02-spanish': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-english': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json'
),
'xlm-roberta-large-finetuned-conll03-german': (
'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json'
),
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """xlm-roberta"""
def __init__( self: Optional[Any] , a: int=3_0522 , a: List[Any]=768 , a: Tuple=12 , a: List[str]=12 , a: Dict=3072 , a: List[str]="gelu" , a: Any=0.1 , a: Optional[Any]=0.1 , a: str=512 , a: Optional[int]=2 , a: int=0.0_2 , a: str=1e-12 , a: str=1 , a: List[Any]=0 , a: Dict=2 , a: Dict="absolute" , a: List[Any]=True , a: str=None , **a: List[Any] , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : str = hidden_act
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : int = max_position_embeddings
__lowerCamelCase : Any = type_vocab_size
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : List[Any] = position_embedding_type
__lowerCamelCase : List[str] = use_cache
__lowerCamelCase : Optional[int] = classifier_dropout
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@property
def _snake_case ( self: Optional[Any] ):
if self.task == "multiple-choice":
__lowerCamelCase : int = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 669 | 1 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
__lowerCamelCase : int = hex_num[0] == '-'
if is_negative:
__lowerCamelCase : int = hex_num[1:]
try:
__lowerCamelCase : Optional[int] = int(SCREAMING_SNAKE_CASE__ , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
__lowerCamelCase : Optional[Any] = ''
while int_num > 0:
__lowerCamelCase : Tuple = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = ConsistencyModelPipeline
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__snake_case = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__snake_case = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""output_type""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
@property
def _snake_case ( self: str ):
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _snake_case ( self: int , a: str=False ):
if class_cond:
__lowerCamelCase : str = self.dummy_cond_unet
else:
__lowerCamelCase : str = self.dummy_uncond_unet
# Default to CM multistep sampler
__lowerCamelCase : Tuple = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Union[str, Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _snake_case ( self: int , a: List[str] , a: Any=0 ):
if str(a ).startswith('mps' ):
__lowerCamelCase : List[Any] = torch.manual_seed(a )
else:
__lowerCamelCase : Tuple = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : str = ConsistencyModelPipeline(**a )
__lowerCamelCase : str = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Any = self.get_dummy_inputs(a )
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Dict = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[int] = ConsistencyModelPipeline(**a )
__lowerCamelCase : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Union[str, Any] = self.get_dummy_inputs(a )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[int] = self.get_dummy_components()
__lowerCamelCase : Tuple = ConsistencyModelPipeline(**a )
__lowerCamelCase : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : Tuple = self.get_dummy_inputs(a )
__lowerCamelCase : str = 1
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Any = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _snake_case ( self: List[str] ):
__lowerCamelCase : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.get_dummy_components(class_cond=a )
__lowerCamelCase : Optional[Any] = ConsistencyModelPipeline(**a )
__lowerCamelCase : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_dummy_inputs(a )
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = 0
__lowerCamelCase : Tuple = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase : int = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Any ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self: Optional[int] , a: str=0 , a: Tuple=False , a: Tuple="cpu" , a: List[str]=torch.floataa , a: Optional[Any]=(1, 3, 64, 64) ):
__lowerCamelCase : Optional[Any] = torch.manual_seed(a )
__lowerCamelCase : Optional[int] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
__lowerCamelCase : Dict = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
__lowerCamelCase : Optional[Any] = latents
return inputs
def _snake_case ( self: Any , a: Any=0 , a: List[str]="cpu" , a: Optional[Any]=torch.floataa , a: int=(1, 3, 64, 64) ):
if type(a ) == str:
__lowerCamelCase : Dict = torch.device(a )
__lowerCamelCase : Union[str, Any] = torch.Generator(device=a ).manual_seed(a )
__lowerCamelCase : str = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _snake_case ( self: str ):
__lowerCamelCase : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs()
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : List[str] = self.get_inputs()
__lowerCamelCase : Optional[Any] = 1
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Tuple = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : int = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : int = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _snake_case ( self: Dict ):
__lowerCamelCase : Dict = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
__lowerCamelCase : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
__lowerCamelCase : str = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
__lowerCamelCase : str = self.get_inputs(get_fixed_latents=a , device=a )
__lowerCamelCase : str = 1
__lowerCamelCase : Union[str, Any] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
__lowerCamelCase : Optional[int] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : str = image[0, -3:, -3:, -1]
__lowerCamelCase : str = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 669 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """timm_backbone"""
def __init__( self: Union[str, Any] , a: Tuple=None , a: List[str]=3 , a: List[Any]=True , a: Optional[int]=True , a: Tuple=None , **a: Any , ):
super().__init__(**a )
__lowerCamelCase : Union[str, Any] = backbone
__lowerCamelCase : List[str] = num_channels
__lowerCamelCase : int = features_only
__lowerCamelCase : str = use_pretrained_backbone
__lowerCamelCase : List[str] = True
__lowerCamelCase : int = out_indices if out_indices is not None else (-1,)
| 669 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """trocr"""
__snake_case = ["""past_key_values"""]
__snake_case = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self: Dict , a: List[str]=5_0265 , a: Optional[Any]=1024 , a: Tuple=12 , a: Dict=16 , a: Optional[Any]=4096 , a: Optional[Any]="gelu" , a: Optional[int]=512 , a: int=0.1 , a: str=0.0 , a: Union[str, Any]=0.0 , a: Any=2 , a: Optional[int]=0.0_2 , a: Optional[Any]=0.0 , a: List[Any]=True , a: Any=False , a: int=True , a: Optional[Any]=True , a: Tuple=1 , a: Union[str, Any]=0 , a: Any=2 , **a: List[Any] , ):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : List[str] = decoder_layers
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : List[str] = decoder_ffn_dim
__lowerCamelCase : Optional[int] = activation_function
__lowerCamelCase : Optional[Any] = max_position_embeddings
__lowerCamelCase : Dict = dropout
__lowerCamelCase : int = attention_dropout
__lowerCamelCase : List[str] = activation_dropout
__lowerCamelCase : Union[str, Any] = init_std
__lowerCamelCase : Tuple = decoder_layerdrop
__lowerCamelCase : str = use_cache
__lowerCamelCase : List[Any] = scale_embedding
__lowerCamelCase : Any = use_learned_position_embeddings
__lowerCamelCase : List[Any] = layernorm_embedding
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , decoder_start_token_id=a , **a , )
| 669 | 1 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
# word like '180' or '身高' or '神'
for char in word:
__lowerCamelCase : List[str] = ord(SCREAMING_SNAKE_CASE__ )
if not _is_chinese_char(SCREAMING_SNAKE_CASE__ ):
return 0
return 1
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = set()
for token in tokens:
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE__ ) > 1 and is_chinese(SCREAMING_SNAKE_CASE__ )
if chinese_word:
word_set.add(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Dict = list(SCREAMING_SNAKE_CASE__ )
return word_list
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if not chinese_word_set:
return bert_tokens
__lowerCamelCase : Tuple = max([len(SCREAMING_SNAKE_CASE__ ) for w in chinese_word_set] )
__lowerCamelCase : List[str] = bert_tokens
__lowerCamelCase , __lowerCamelCase : Optional[int] = 0, len(SCREAMING_SNAKE_CASE__ )
while start < end:
__lowerCamelCase : Dict = True
if is_chinese(bert_word[start] ):
__lowerCamelCase : Union[str, Any] = min(end - start , SCREAMING_SNAKE_CASE__ )
for i in range(SCREAMING_SNAKE_CASE__ , 1 , -1 ):
__lowerCamelCase : List[str] = ''.join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
__lowerCamelCase : Optional[int] = '##' + bert_word[j]
__lowerCamelCase : Dict = start + i
__lowerCamelCase : Dict = False
break
if single_word:
start += 1
return bert_word
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = []
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 100 ):
__lowerCamelCase : List[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
__lowerCamelCase : Tuple = [get_chinese_word(SCREAMING_SNAKE_CASE__ ) for r in res]
ltp_res.extend(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = []
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , 100 ):
__lowerCamelCase : Any = bert_tokenizer(lines[i : i + 100] , add_special_tokens=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=512 )
bert_res.extend(res['input_ids'] )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = []
for input_ids, chinese_word in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = []
for id in input_ids:
__lowerCamelCase : Union[str, Any] = bert_tokenizer._convert_id_to_token(SCREAMING_SNAKE_CASE__ )
input_tokens.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Tuple = add_sub_symbol(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(SCREAMING_SNAKE_CASE__ ):
if token[:2] == "##":
__lowerCamelCase : str = token[2:]
# save chinese tokens' pos
if len(SCREAMING_SNAKE_CASE__ ) == 1 and _is_chinese_char(ord(SCREAMING_SNAKE_CASE__ ) ):
ref_id.append(SCREAMING_SNAKE_CASE__ )
ref_ids.append(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
return ref_ids
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , 'r' , encoding='utf-8' ) as f:
__lowerCamelCase : Union[str, Any] = f.readlines()
__lowerCamelCase : Optional[int] = [line.strip() for line in data if len(SCREAMING_SNAKE_CASE__ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
__lowerCamelCase : List[Any] = LTP(args.ltp ) # faster in GPU device
__lowerCamelCase : Optional[Any] = BertTokenizer.from_pretrained(args.bert )
__lowerCamelCase : Dict = prepare_ref(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
with open(args.save_path , 'w' , encoding='utf-8' ) as f:
__lowerCamelCase : Any = [json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' for ref in ref_ids]
f.writelines(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
lowercase_ = parser.parse_args()
main(args)
| 669 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ):
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : str = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ):
return self.tokenizer.decode(*a , **a )
@property
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = (CMStochasticIterativeScheduler,)
__snake_case = 10
def _snake_case ( self: Any , **a: Dict ):
__lowerCamelCase : Optional[Any] = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
config.update(**a )
return config
def _snake_case ( self: List[Any] ):
__lowerCamelCase : Any = 10
__lowerCamelCase : Any = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = self.scheduler_classes[0](**a )
scheduler.set_timesteps(a )
__lowerCamelCase : Any = scheduler.timesteps[0]
__lowerCamelCase : List[str] = scheduler.timesteps[1]
__lowerCamelCase : Union[str, Any] = self.dummy_sample
__lowerCamelCase : int = 0.1 * sample
__lowerCamelCase : Optional[Any] = scheduler.step(a , a , a ).prev_sample
__lowerCamelCase : List[str] = scheduler.step(a , a , a ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _snake_case ( self: Optional[Any] ):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a )
def _snake_case ( self: List[str] ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Tuple = self.scheduler_classes[0]
__lowerCamelCase : Tuple = self.get_scheduler_config()
__lowerCamelCase : Tuple = scheduler_class(**a )
__lowerCamelCase : int = 1
scheduler.set_timesteps(a )
__lowerCamelCase : Optional[int] = scheduler.timesteps
__lowerCamelCase : List[str] = torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a ):
# 1. scale model input
__lowerCamelCase : List[str] = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Optional[int] = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : str = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : str = pred_prev_sample
__lowerCamelCase : List[str] = torch.sum(torch.abs(a ) )
__lowerCamelCase : str = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_9_2.7_6_1_4 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_1_0 ) < 1e-3
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Optional[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**a )
__lowerCamelCase : List[Any] = [106, 0]
scheduler.set_timesteps(timesteps=a )
__lowerCamelCase : Dict = scheduler.timesteps
__lowerCamelCase : int = torch.manual_seed(0 )
__lowerCamelCase : Any = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__lowerCamelCase : Tuple = scheduler.scale_model_input(a , a )
# 2. predict noise residual
__lowerCamelCase : Tuple = model(a , a )
# 3. predict previous sample x_t-1
__lowerCamelCase : Any = scheduler.step(a , a , a , generator=a ).prev_sample
__lowerCamelCase : Any = pred_prev_sample
__lowerCamelCase : Dict = torch.sum(torch.abs(a ) )
__lowerCamelCase : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 3_4_7.6_3_5_7 ) < 1e-2
assert abs(result_mean.item() - 0.4_5_2_7 ) < 1e-3
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : int = self.get_scheduler_config()
__lowerCamelCase : List[Any] = scheduler_class(**a )
__lowerCamelCase : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(a , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.scheduler_classes[0]
__lowerCamelCase : Union[str, Any] = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [39, 30, 12, 1, 0]
__lowerCamelCase : List[Any] = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Dict = self.get_scheduler_config()
__lowerCamelCase : Union[str, Any] = scheduler_class(**a )
__lowerCamelCase : Optional[int] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a )
| 669 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _snake_case ( self: int ):
torch.manual_seed(0 )
__lowerCamelCase : int = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def _snake_case ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase : Any = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def _snake_case ( self: Dict ):
torch.manual_seed(0 )
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(a )
def _snake_case ( self: List[str] ):
__lowerCamelCase : Union[str, Any] = self.dummy_uncond_unet
__lowerCamelCase : List[str] = DDIMScheduler()
__lowerCamelCase : str = self.dummy_vq_model
__lowerCamelCase : Optional[int] = LDMPipeline(unet=a , vqvae=a , scheduler=a )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Any = ldm(generator=a , num_inference_steps=2 , output_type='numpy' ).images
__lowerCamelCase : Tuple = torch.manual_seed(0 )
__lowerCamelCase : Dict = ldm(generator=a , num_inference_steps=2 , output_type='numpy' , return_dict=a )[0]
__lowerCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCamelCase : Optional[int] = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
__lowerCamelCase : str = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : int = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(a )
ldm.set_progress_bar_config(disable=a )
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : int = ldm(generator=a , num_inference_steps=5 , output_type='numpy' ).images
__lowerCamelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase : List[Any] = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
__lowerCamelCase : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 669 | 1 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """AutoImageProcessor"""
__snake_case = """AutoTokenizer"""
def __init__( self: List[str] , a: Optional[Any]=None , a: List[Any]=None , **a: Dict ):
__lowerCamelCase : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : Optional[int] = kwargs.pop('feature_extractor' )
__lowerCamelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
__lowerCamelCase : Tuple = self.image_processor
__lowerCamelCase : Union[str, Any] = False
def __call__( self: List[str] , *a: Dict , **a: Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a , **a )
__lowerCamelCase : int = kwargs.pop('images' , a )
__lowerCamelCase : int = kwargs.pop('text' , a )
if len(a ) > 0:
__lowerCamelCase : Tuple = args[0]
__lowerCamelCase : Optional[int] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase : int = self.image_processor(a , *a , **a )
if text is not None:
__lowerCamelCase : int = self.tokenizer(a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : Tuple = encodings['input_ids']
return inputs
def _snake_case ( self: Optional[int] , *a: Optional[Any] , **a: Any ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Union[str, Any] , *a: Any , **a: str ):
return self.tokenizer.decode(*a , **a )
@contextmanager
def _snake_case ( self: int ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : Optional[int] = self.tokenizer
yield
__lowerCamelCase : Any = self.image_processor
__lowerCamelCase : Dict = False
def _snake_case ( self: Tuple , a: Union[str, Any] , a: str=False , a: Dict=None ):
if added_vocab is None:
__lowerCamelCase : Any = self.tokenizer.get_added_vocab()
__lowerCamelCase : Optional[Any] = {}
while tokens:
__lowerCamelCase : Any = re.search(R'<s_(.*?)>' , a , re.IGNORECASE )
if start_token is None:
break
__lowerCamelCase : List[Any] = start_token.group(1 )
__lowerCamelCase : int = re.search(RF'</s_{key}>' , a , re.IGNORECASE )
__lowerCamelCase : List[str] = start_token.group()
if end_token is None:
__lowerCamelCase : Dict = tokens.replace(a , '' )
else:
__lowerCamelCase : Union[str, Any] = end_token.group()
__lowerCamelCase : int = re.escape(a )
__lowerCamelCase : Any = re.escape(a )
__lowerCamelCase : Optional[Any] = re.search(F'{start_token_escaped}(.*?){end_token_escaped}' , a , re.IGNORECASE )
if content is not None:
__lowerCamelCase : int = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCamelCase : Union[str, Any] = self.tokenajson(a , is_inner_value=a , added_vocab=a )
if value:
if len(a ) == 1:
__lowerCamelCase : str = value[0]
__lowerCamelCase : Union[str, Any] = value
else: # leaf nodes
__lowerCamelCase : Optional[Any] = []
for leaf in content.split(R'<sep/>' ):
__lowerCamelCase : List[str] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCamelCase : str = leaf[1:-2] # for categorical special tokens
output[key].append(a )
if len(output[key] ) == 1:
__lowerCamelCase : List[Any] = output[key][0]
__lowerCamelCase : Tuple = tokens[tokens.find(a ) + len(a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=a , added_vocab=a )
if len(a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self: Optional[Any] ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def _snake_case ( self: str ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor
| 669 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowercase_ = 'Usage of script: script_name <size_of_canvas:int>'
lowercase_ = [0] * 1_0_0 + [1] * 1_0
random.shuffle(choice)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = [[False for i in range(SCREAMING_SNAKE_CASE__ )] for j in range(SCREAMING_SNAKE_CASE__ )]
return canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
for i, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for j, _ in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = bool(random.getrandbits(1 ) )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = np.array(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(SCREAMING_SNAKE_CASE__ ):
for c, pt in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = __judge_point(
SCREAMING_SNAKE_CASE__ , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCamelCase : Any = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCamelCase : list[list[bool]] = current_canvas.tolist()
return return_canvas
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : Optional[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCamelCase : Tuple = pt
if pt:
if alive < 2:
__lowerCamelCase : Optional[Any] = False
elif alive == 2 or alive == 3:
__lowerCamelCase : Any = True
elif alive > 3:
__lowerCamelCase : Dict = False
else:
if alive == 3:
__lowerCamelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowercase_ = int(sys.argv[1])
# main working structure of this module.
lowercase_ = create_canvas(canvas_size)
seed(c)
lowercase_ ,lowercase_ = plt.subplots()
fig.show()
lowercase_ = ListedColormap(['w', 'k'])
try:
while True:
lowercase_ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 669 | 1 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if n_term == "":
return []
__lowerCamelCase : list = []
for temp in range(int(SCREAMING_SNAKE_CASE__ ) ):
series.append(f'1/{temp + 1}' if series else '1' )
return series
if __name__ == "__main__":
lowercase_ = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 669 |
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """char"""
__snake_case = """bpe"""
__snake_case = """wp"""
lowercase_ = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """char_tokenizer"""]
__snake_case = """ViTImageProcessor"""
__snake_case = """MgpstrTokenizer"""
def __init__( self: int , a: Dict=None , a: Optional[int]=None , **a: List[str] ):
__lowerCamelCase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : Optional[Any] = kwargs.pop('feature_extractor' )
__lowerCamelCase : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained('gpt2' )
__lowerCamelCase : int = AutoTokenizer.from_pretrained('bert-base-uncased' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: Optional[int]=None , a: List[Any]=None , a: int=None , **a: str ):
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCamelCase : Dict = self.image_processor(a , return_tensors=a , **a )
if text is not None:
__lowerCamelCase : Dict = self.char_tokenizer(a , return_tensors=a , **a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCamelCase : List[str] = encodings['input_ids']
return inputs
def _snake_case ( self: List[str] , a: List[Any] ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = sequences
__lowerCamelCase : List[str] = char_preds.size(0 )
__lowerCamelCase , __lowerCamelCase : str = self._decode_helper(a , 'char' )
__lowerCamelCase , __lowerCamelCase : Optional[int] = self._decode_helper(a , 'bpe' )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self._decode_helper(a , 'wp' )
__lowerCamelCase : Tuple = []
__lowerCamelCase : List[Any] = []
for i in range(a ):
__lowerCamelCase : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
__lowerCamelCase : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]]
__lowerCamelCase : Any = scores.index(max(a ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__lowerCamelCase : List[str] = {}
__lowerCamelCase : Optional[int] = final_strs
__lowerCamelCase : Dict = final_scores
__lowerCamelCase : Dict = char_strs
__lowerCamelCase : List[Any] = bpe_strs
__lowerCamelCase : Tuple = wp_strs
return out
def _snake_case ( self: int , a: Optional[int] , a: Optional[Any] ):
if format == DecodeType.CHARACTER:
__lowerCamelCase : Optional[Any] = self.char_decode
__lowerCamelCase : Union[str, Any] = 1
__lowerCamelCase : List[str] = '[s]'
elif format == DecodeType.BPE:
__lowerCamelCase : Dict = self.bpe_decode
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Any = '#'
elif format == DecodeType.WORDPIECE:
__lowerCamelCase : List[str] = self.wp_decode
__lowerCamelCase : int = 102
__lowerCamelCase : Dict = '[SEP]'
else:
raise ValueError(F'Format {format} is not supported.' )
__lowerCamelCase , __lowerCamelCase : int = [], []
__lowerCamelCase : Tuple = pred_logits.size(0 )
__lowerCamelCase : List[Any] = pred_logits.size(1 )
__lowerCamelCase , __lowerCamelCase : Dict = pred_logits.topk(1 , dim=-1 , largest=a , sorted=a )
__lowerCamelCase : List[str] = preds_index.view(-1 , a )[:, 1:]
__lowerCamelCase : Dict = decoder(a )
__lowerCamelCase , __lowerCamelCase : Optional[Any] = torch.nn.functional.softmax(a , dim=2 ).max(dim=2 )
__lowerCamelCase : List[str] = preds_max_prob[:, 1:]
for index in range(a ):
__lowerCamelCase : str = preds_str[index].find(a )
__lowerCamelCase : Tuple = preds_str[index][:pred_eos]
__lowerCamelCase : Any = preds_index[index].cpu().tolist()
__lowerCamelCase : Any = pred_index.index(a ) if eos_token in pred_index else -1
__lowerCamelCase : str = preds_max_prob[index][: pred_eos_index + 1]
__lowerCamelCase : Union[str, Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(a )
conf_scores.append(a )
return dec_strs, conf_scores
def _snake_case ( self: Tuple , a: Optional[int] ):
__lowerCamelCase : Dict = [seq.replace(' ' , '' ) for seq in self.char_tokenizer.batch_decode(a )]
return decode_strs
def _snake_case ( self: Optional[int] , a: Tuple ):
return self.bpe_tokenizer.batch_decode(a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : int = [seq.replace(' ' , '' ) for seq in self.wp_tokenizer.batch_decode(a )]
return decode_strs
| 669 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""image_processor""", """tokenizer"""]
__snake_case = """CLIPImageProcessor"""
__snake_case = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self: Union[str, Any] , a: int=None , a: List[str]=None , **a: str ):
__lowerCamelCase : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
__lowerCamelCase : str = kwargs.pop('feature_extractor' )
__lowerCamelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self: Optional[int] , a: List[Any]=None , a: List[str]=None , a: int=None , **a: List[Any] ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
__lowerCamelCase : Dict = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
__lowerCamelCase : Tuple = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
__lowerCamelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def _snake_case ( self: List[Any] , *a: Optional[Any] , **a: int ):
return self.tokenizer.batch_decode(*a , **a )
def _snake_case ( self: Any , *a: Union[str, Any] , **a: Optional[Any] ):
return self.tokenizer.decode(*a , **a )
@property
def _snake_case ( self: List[str] ):
__lowerCamelCase : Optional[Any] = self.tokenizer.model_input_names
__lowerCamelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 669 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
lowercase_ = logging.get_logger(__name__)
lowercase_ = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__lowerCamelCase : Optional[int] = TOKENIZER_CLASSES
else:
__lowerCamelCase : Union[str, Any] = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE__ , tokenizer_name + 'Fast' )}
logger.info(f'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__lowerCamelCase : int = TOKENIZER_CLASSES[tokenizer_name]
__lowerCamelCase : Optional[int] = True
if checkpoint_name is None:
__lowerCamelCase : List[Any] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowerCamelCase : Optional[Any] = [checkpoint_name]
logger.info(f'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(f'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__lowerCamelCase : Tuple = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ , force_download=SCREAMING_SNAKE_CASE__ )
# Save fast tokenizer
logger.info(f'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowerCamelCase , __lowerCamelCase : Tuple = checkpoint.split('/' )
__lowerCamelCase : Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif add_prefix:
__lowerCamelCase : Any = checkpoint
__lowerCamelCase : Dict = dump_path
else:
__lowerCamelCase : List[str] = None
__lowerCamelCase : Optional[int] = dump_path
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowerCamelCase : List[Any] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowerCamelCase : int = file_path.split(SCREAMING_SNAKE_CASE__ )[-1][0]
if next_char == "/":
__lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : int = None
logger.info(f'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__lowerCamelCase : Dict = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE__ , legacy_format=SCREAMING_SNAKE_CASE__ , filename_prefix=SCREAMING_SNAKE_CASE__ )
logger.info(f'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE__ )
logger.info(f'=> removing {file_name}' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
F"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
lowercase_ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 669 | 1 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
lowercase_ = logging.getLogger(__name__)
@dataclass
class A_ :
'''simple docstring'''
__snake_case = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__snake_case = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
__snake_case = field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__snake_case = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
__snake_case = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__snake_case = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__snake_case = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
__snake_case = field(default=__UpperCamelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def _snake_case ( self: int ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError('Need either a GLUE task, a training/validation file or a dataset name.' )
else:
__lowerCamelCase : List[Any] = self.train_file.split('.' )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowerCamelCase : int = self.validation_file.split('.' )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class A_ :
'''simple docstring'''
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__snake_case = field(
default=__UpperCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__snake_case = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__snake_case = field(
default=__UpperCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def UpperCamelCase__ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCamelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowerCamelCase : str = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE__ )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__lowerCamelCase : List[Any] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowerCamelCase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowerCamelCase : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowerCamelCase : Optional[Any] = {'train': data_args.train_file, 'validation': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowerCamelCase : Optional[Any] = data_args.train_file.split('.' )[-1]
__lowerCamelCase : Any = data_args.test_file.split('.' )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowerCamelCase : Optional[int] = data_args.test_file
else:
raise ValueError('Need either a GLUE task or a test file for `do_predict`.' )
for key in data_files.keys():
logger.info(f'load a local file for {key}: {data_files[key]}' )
if data_args.train_file.endswith('.csv' ):
# Loading a dataset from local csv files
__lowerCamelCase : Tuple = load_dataset('csv' , data_files=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowerCamelCase : List[Any] = load_dataset('json' , data_files=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowerCamelCase : Dict = raw_datasets['train'].features['label'].names
__lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCamelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowerCamelCase : Tuple = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=SCREAMING_SNAKE_CASE__ , )
__lowerCamelCase : int = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowerCamelCase : Optional[Any] = 'max_length'
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowerCamelCase : List[str] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowerCamelCase : Optional[int] = {'Refused': 0, 'Entailed': 1}
__lowerCamelCase : Optional[int] = {0: 'Refused', 1: 'Entailed'}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' )
__lowerCamelCase : Tuple = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(SCREAMING_SNAKE_CASE__ ):
# Tokenize the texts
def _convert_table_text_to_pandas(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = [_table_row.split('#' ) for _table_row in _table_text.strip('\n' ).split('\n' )]
__lowerCamelCase : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowerCamelCase : Any = examples['statement']
__lowerCamelCase : Tuple = list(map(_convert_table_text_to_pandas , examples['table_text'] ) )
__lowerCamelCase : str = tokenizer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : List[Any] = examples['label']
return result
with training_args.main_process_first(desc='dataset map pre-processing' ):
__lowerCamelCase : Optional[Any] = raw_datasets.map(
SCREAMING_SNAKE_CASE__ , batched=SCREAMING_SNAKE_CASE__ , load_from_cache_file=not data_args.overwrite_cache , desc='Running tokenizer on dataset' , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('--do_train requires a train dataset' )
__lowerCamelCase : int = raw_datasets['train']
if data_args.max_train_samples is not None:
__lowerCamelCase : List[str] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError('--do_eval requires a validation dataset' )
__lowerCamelCase : int = raw_datasets['validation']
if data_args.max_eval_samples is not None:
__lowerCamelCase : Optional[Any] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError('--do_predict requires a test dataset' )
__lowerCamelCase : int = raw_datasets['test']
if data_args.max_predict_samples is not None:
__lowerCamelCase : Any = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE__ ) ) , 3 ):
logger.info(f'Sample {index} of the training set: {train_dataset[index]}.' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = p.predictions[0] if isinstance(p.predictions , SCREAMING_SNAKE_CASE__ ) else p.predictions
__lowerCamelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowerCamelCase : Dict = default_data_collator
elif training_args.fpaa:
__lowerCamelCase : Tuple = DataCollatorWithPadding(SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=8 )
else:
__lowerCamelCase : int = None
# Initialize our Trainer
__lowerCamelCase : Tuple = Trainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , data_collator=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
__lowerCamelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
__lowerCamelCase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowerCamelCase : Optional[Any] = last_checkpoint
__lowerCamelCase : List[str] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = train_result.metrics
__lowerCamelCase : Tuple = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE__ )
)
__lowerCamelCase : Optional[int] = min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('train' , SCREAMING_SNAKE_CASE__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__lowerCamelCase : Any = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
trainer.log_metrics('eval' , SCREAMING_SNAKE_CASE__ )
trainer.save_metrics('eval' , SCREAMING_SNAKE_CASE__ )
if training_args.do_predict:
logger.info('*** Predict ***' )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowerCamelCase : Union[str, Any] = predict_dataset.remove_columns('label' )
__lowerCamelCase : str = trainer.predict(SCREAMING_SNAKE_CASE__ , metric_key_prefix='predict' ).predictions
__lowerCamelCase : str = np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
__lowerCamelCase : Tuple = os.path.join(training_args.output_dir , 'predict_results_tabfact.txt' )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as writer:
logger.info('***** Predict Results *****' )
writer.write('index\tprediction\n' )
for index, item in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = label_list[item]
writer.write(f'{index}\t{item}\n' )
__lowerCamelCase : List[Any] = {'finetuned_from': model_args.model_name_or_path, 'tasks': 'text-classification'}
if training_args.push_to_hub:
trainer.push_to_hub(**SCREAMING_SNAKE_CASE__ )
else:
trainer.create_model_card(**SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 669 |
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : List[str] = PegasusTokenizer(a )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[Any] ):
return PegasusTokenizer.from_pretrained('google/pegasus-large' )
def _snake_case ( self: Tuple , **a: List[Any] ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[Any] , a: int ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = '</s>'
__lowerCamelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<pad>' )
self.assertEqual(vocab_keys[1] , '</s>' )
self.assertEqual(vocab_keys[-1] , 'v' )
self.assertEqual(len(a ) , 1103 )
def _snake_case ( self: Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 1103 )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'
' </s> <pad> <pad> <pad>'
)
__lowerCamelCase : Optional[Any] = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: int ):
__lowerCamelCase : Union[str, Any] = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
__lowerCamelCase : Tuple = '<mask_1> To ensure a <mask_2> flow of bank resolutions.'
__lowerCamelCase : Optional[Any] = [2, 413, 615, 114, 3, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : Optional[Any] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
def _snake_case ( self: Dict ):
__lowerCamelCase : Any = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_6103
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 103
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 105
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 1024
__lowerCamelCase : int = 'To ensure a smooth flow of bank resolutions.'
__lowerCamelCase : Union[str, Any] = [413, 615, 114, 2291, 1971, 113, 1679, 1_0710, 107, 1]
__lowerCamelCase : List[str] = tokenizer([raw_input_str] , return_tensors=a ).input_ids[0]
self.assertListEqual(a , a )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def _snake_case ( self: str ):
__lowerCamelCase : List[str] = ['This is going to be way too long.' * 150, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : Union[str, Any] = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : List[str] = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 1024)
assert batch.attention_mask.shape == (2, 1024)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
@slow
def _snake_case ( self: List[str] ):
# fmt: off
__lowerCamelCase : Tuple = {'input_ids': [[3_8979, 143, 1_8485, 606, 130, 2_6669, 8_7686, 121, 5_4189, 1129, 111, 2_6669, 8_7686, 121, 9114, 1_4787, 121, 1_3249, 158, 592, 956, 121, 1_4621, 3_1576, 143, 6_2613, 108, 9688, 930, 4_3430, 1_1562, 6_2613, 304, 108, 1_1443, 897, 108, 9314, 1_7415, 6_3399, 108, 1_1443, 7614, 1_8316, 118, 4284, 7148, 1_2430, 143, 1400, 2_5703, 158, 111, 4284, 7148, 1_1772, 143, 2_1297, 1064, 158, 122, 204, 3506, 1754, 1133, 1_4787, 1581, 115, 3_3224, 4482, 111, 1355, 110, 2_9173, 317, 5_0833, 108, 2_0147, 9_4665, 111, 7_7198, 107, 1], [110, 6_2613, 117, 638, 112, 1133, 121, 2_0098, 1355, 7_9050, 1_3872, 135, 1596, 5_3541, 1352, 141, 1_3039, 5542, 124, 302, 518, 111, 268, 2956, 115, 149, 4427, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [139, 1235, 2799, 1_8289, 1_7780, 204, 109, 9474, 1296, 107, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='google/bigbird-pegasus-large-arxiv' , revision='ba85d0851d708441f91440d509690f1ab6353415' , )
@require_sentencepiece
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = PegasusTokenizer
__snake_case = PegasusTokenizerFast
__snake_case = True
__snake_case = True
def _snake_case ( self: str ):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : str = PegasusTokenizer(a , offset=0 , mask_token_sent=a , mask_token='[MASK]' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _snake_case ( self: List[str] ):
return PegasusTokenizer.from_pretrained('google/bigbird-pegasus-large-arxiv' )
def _snake_case ( self: Union[str, Any] , **a: Dict ):
return PegasusTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: List[str] , a: Any ):
return ("This is a test", "This is a test")
def _snake_case ( self: Any ):
__lowerCamelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname )
__lowerCamelCase : Tuple = (
'Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'
' <pad> <pad> <pad>'
)
__lowerCamelCase : int = rust_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
__lowerCamelCase : str = py_tokenizer([raw_input_str] , return_tensors=a , add_special_tokens=a ).input_ids[0]
self.assertListEqual(a , a )
@require_torch
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = ['This is going to be way too long.' * 1000, 'short example']
__lowerCamelCase : Tuple = ['not super long but more than 5 tokens', 'tiny']
__lowerCamelCase : str = self._large_tokenizer(a , padding=a , truncation=a , return_tensors='pt' )
__lowerCamelCase : Any = self._large_tokenizer(
text_target=a , max_length=5 , padding=a , truncation=a , return_tensors='pt' )
assert batch.input_ids.shape == (2, 4096)
assert batch.attention_mask.shape == (2, 4096)
assert targets["input_ids"].shape == (2, 5)
assert len(a ) == 2 # input_ids, attention_mask.
def _snake_case ( self: Any ):
__lowerCamelCase : int = (
'This is an example string that is used to test the original TF implementation against the HF'
' implementation'
)
__lowerCamelCase : Dict = self._large_tokenizer(a ).input_ids
self.assertListEqual(
a , [182, 117, 142, 587, 4211, 120, 117, 263, 112, 804, 109, 856, 2_5016, 3137, 464, 109, 2_6955, 3137, 1] , )
| 669 | 1 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase_ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
from transformers.testing_utils import pytest_terminal_summary_main
__lowerCamelCase : Tuple = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
| 669 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: str ):
__lowerCamelCase : Optional[Any] = inspect.getfile(accelerate.test_utils )
__lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_script.py'] )
__lowerCamelCase : Optional[Any] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Optional[int] = F'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
__lowerCamelCase : Any = [sys.executable] + distributed_args
execute_subprocess_async(a , env=os.environ.copy() )
| 669 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = 2
while i * i <= n:
__lowerCamelCase : int = 0
while n % i == 0:
n //= i
multiplicity += 1
n_divisors *= multiplicity + 1
i += 1
if n > 1:
n_divisors *= 2
return n_divisors
def UpperCamelCase__ ( ):
__lowerCamelCase : str = 1
__lowerCamelCase : List[str] = 1
while True:
i += 1
t_num += i
if count_divisors(SCREAMING_SNAKE_CASE__ ) > 500:
break
return t_num
if __name__ == "__main__":
print(solution())
| 669 | 1 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase_ = TypeVar('T')
class A_ ( Generic[T] ):
'''simple docstring'''
__snake_case = 42 # Cache store of keys
__snake_case = 42 # References of the keys in cache
__snake_case = 10 # Maximum capacity of cache
def __init__( self: List[Any] , a: int ):
__lowerCamelCase : Optional[Any] = deque()
__lowerCamelCase : Optional[int] = set()
if not n:
__lowerCamelCase : Union[str, Any] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
__lowerCamelCase : Optional[Any] = n
def _snake_case ( self: Union[str, Any] , a: T ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__lowerCamelCase : str = self.dq_store.pop()
self.key_reference.remove(a )
else:
self.dq_store.remove(a )
self.dq_store.appendleft(a )
self.key_reference.add(a )
def _snake_case ( self: Optional[Any] ):
for k in self.dq_store:
print(a )
def __repr__( self: Any ):
return F'LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}'
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 669 |
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] ):
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : List[str] = None
__lowerCamelCase : int = 0
__lowerCamelCase : int = 0
__lowerCamelCase : Union[str, Any] = 0
def __eq__( self: Optional[int] , a: List[Any] ):
return self.position == cell.position
def _snake_case ( self: Any ):
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self: str , a: List[str]=(5, 5) ):
__lowerCamelCase : Optional[Any] = np.zeros(a )
__lowerCamelCase : List[str] = world_size[0]
__lowerCamelCase : Optional[int] = world_size[1]
def _snake_case ( self: List[Any] ):
print(self.w )
def _snake_case ( self: Optional[int] , a: str ):
__lowerCamelCase : Tuple = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
__lowerCamelCase : Optional[int] = cell.position[0]
__lowerCamelCase : List[str] = cell.position[1]
__lowerCamelCase : Dict = []
for n in neughbour_cord:
__lowerCamelCase : Dict = current_x + n[0]
__lowerCamelCase : Optional[Any] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
__lowerCamelCase : Optional[Any] = Cell()
__lowerCamelCase : Any = (x, y)
__lowerCamelCase : Dict = cell
neighbours.append(a )
return neighbours
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = []
__lowerCamelCase : int = []
_open.append(SCREAMING_SNAKE_CASE__ )
while _open:
__lowerCamelCase : Union[str, Any] = np.argmin([n.f for n in _open] )
__lowerCamelCase : int = _open[min_f]
_closed.append(_open.pop(SCREAMING_SNAKE_CASE__ ) )
if current == goal:
break
for n in world.get_neigbours(SCREAMING_SNAKE_CASE__ ):
for c in _closed:
if c == n:
continue
__lowerCamelCase : Optional[int] = current.g + 1
__lowerCamelCase , __lowerCamelCase : int = n.position
__lowerCamelCase , __lowerCamelCase : Tuple = goal.position
__lowerCamelCase : Dict = (ya - ya) ** 2 + (xa - xa) ** 2
__lowerCamelCase : str = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : Optional[int] = []
while current.parent is not None:
path.append(current.position )
__lowerCamelCase : int = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
lowercase_ = Gridworld()
# Start position and goal
lowercase_ = Cell()
lowercase_ = (0, 0)
lowercase_ = Cell()
lowercase_ = (4, 4)
print(F"""path from {start.position} to {goal.position}""")
lowercase_ = astar(world, start, goal)
# Just for visual reasons.
for i in s:
lowercase_ = 1
print(world.w)
| 669 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
lowercase_ = R'\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `" / "`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `" // "`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `"wiki_dpr"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `"train"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `"compressed"`)\n The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and\n `"compressed"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a "dummy" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n'
@add_start_docstrings(__UpperCamelCase )
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """rag"""
__snake_case = True
def __init__( self: Dict , a: List[str]=None , a: Any=True , a: int=None , a: Dict=None , a: Union[str, Any]=None , a: Dict=None , a: Tuple=None , a: List[Any]=" / " , a: Dict=" // " , a: List[str]=5 , a: Optional[int]=300 , a: str=768 , a: Tuple=8 , a: List[Any]="wiki_dpr" , a: Any="train" , a: Optional[Any]="compressed" , a: Optional[int]=None , a: str=None , a: Tuple=False , a: Optional[int]=False , a: str=0.0 , a: List[Any]=True , a: Optional[int]=False , a: List[Any]=False , a: List[str]=False , a: Optional[int]=True , a: Tuple=None , **a: List[str] , ):
super().__init__(
bos_token_id=a , pad_token_id=a , eos_token_id=a , decoder_start_token_id=a , forced_eos_token_id=a , is_encoder_decoder=a , prefix=a , vocab_size=a , **a , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__lowerCamelCase : str = kwargs.pop('question_encoder' )
__lowerCamelCase : Optional[int] = question_encoder_config.pop('model_type' )
__lowerCamelCase : str = kwargs.pop('generator' )
__lowerCamelCase : Any = decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__lowerCamelCase : Tuple = AutoConfig.for_model(a , **a )
__lowerCamelCase : Optional[int] = AutoConfig.for_model(a , **a )
__lowerCamelCase : List[str] = reduce_loss
__lowerCamelCase : Tuple = label_smoothing
__lowerCamelCase : Optional[int] = exclude_bos_score
__lowerCamelCase : Dict = do_marginalize
__lowerCamelCase : Dict = title_sep
__lowerCamelCase : List[Any] = doc_sep
__lowerCamelCase : Optional[Any] = n_docs
__lowerCamelCase : Optional[Any] = max_combined_length
__lowerCamelCase : List[str] = dataset
__lowerCamelCase : Union[str, Any] = dataset_split
__lowerCamelCase : Union[str, Any] = index_name
__lowerCamelCase : int = retrieval_vector_size
__lowerCamelCase : Optional[Any] = retrieval_batch_size
__lowerCamelCase : Tuple = passages_path
__lowerCamelCase : int = index_path
__lowerCamelCase : Optional[int] = use_dummy_dataset
__lowerCamelCase : Optional[int] = output_retrieved
__lowerCamelCase : Union[str, Any] = do_deduplication
__lowerCamelCase : Optional[int] = use_cache
if self.forced_eos_token_id is None:
__lowerCamelCase : Any = getattr(self.generator , 'forced_eos_token_id' , a )
@classmethod
def _snake_case ( cls: Dict , a: PretrainedConfig , a: PretrainedConfig , **a: Union[str, Any] ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : List[Any] = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Optional[int] = self.question_encoder.to_dict()
__lowerCamelCase : Optional[int] = self.generator.to_dict()
__lowerCamelCase : str = self.__class__.model_type
return output
| 669 |
import math
from datetime import datetime, timedelta
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Tuple = year % 19
__lowerCamelCase : int = year % 4
__lowerCamelCase : Any = year % 7
__lowerCamelCase : Dict = math.floor(year / 100 )
__lowerCamelCase : str = math.floor((13 + 8 * leap_day_inhibits) / 25 )
__lowerCamelCase : Optional[int] = leap_day_inhibits / 4
__lowerCamelCase : str = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__lowerCamelCase : Optional[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__lowerCamelCase : Optional[int] = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__lowerCamelCase : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 19 )
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(SCREAMING_SNAKE_CASE__ , 4 , 18 )
else:
return datetime(SCREAMING_SNAKE_CASE__ , 3 , 22 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_9_9_4, 2_0_0_0, 2_0_1_0, 2_0_2_1, 2_0_2_3):
lowercase_ = 'will be' if year > datetime.now().year else 'was'
print(F"""Easter in {year} {tense} {gauss_easter(year)}""")
| 669 | 1 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
def wrapper(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Union[str, Any] = timeit.default_timer()
__lowerCamelCase : Optional[Any] = func(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__lowerCamelCase : List[str] = timeit.default_timer() - starttime
return delta
__lowerCamelCase : Any = func.__name__
return wrapper
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=None ):
__lowerCamelCase : Any = []
__lowerCamelCase : List[Any] = seq_shapes or {}
for i in range(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[int] = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(SCREAMING_SNAKE_CASE__ , _ArrayXD ):
__lowerCamelCase : str = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(SCREAMING_SNAKE_CASE__ , datasets.Value ):
if v.dtype == "string":
__lowerCamelCase : List[str] = 'The small grey turtle was surprisingly fast when challenged.'
else:
__lowerCamelCase : Dict = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(SCREAMING_SNAKE_CASE__ , datasets.Sequence ):
while isinstance(SCREAMING_SNAKE_CASE__ , datasets.Sequence ):
__lowerCamelCase : Tuple = v.feature
__lowerCamelCase : Union[str, Any] = seq_shapes[k]
__lowerCamelCase : Union[str, Any] = np.random.rand(*SCREAMING_SNAKE_CASE__ ).astype(v.dtype )
__lowerCamelCase : Optional[Any] = data
dummy_data.append((i, example) )
return dummy_data
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=None ):
__lowerCamelCase : Union[str, Any] = generate_examples(SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ , seq_shapes=SCREAMING_SNAKE_CASE__ )
with ArrowWriter(features=SCREAMING_SNAKE_CASE__ , path=SCREAMING_SNAKE_CASE__ ) as writer:
for key, record in dummy_data:
__lowerCamelCase : str = features.encode_example(SCREAMING_SNAKE_CASE__ )
writer.write(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : Tuple = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
__lowerCamelCase : Optional[Any] = datasets.Dataset.from_file(filename=SCREAMING_SNAKE_CASE__ , info=datasets.DatasetInfo(features=SCREAMING_SNAKE_CASE__ ) )
return dataset
| 669 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class A_ ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
__snake_case = 1
@register_to_config
def __init__( self: str , a: str=2000 , a: List[str]=0.1 , a: Any=20 , a: Dict=1e-3 ):
__lowerCamelCase : Dict = None
__lowerCamelCase : Any = None
__lowerCamelCase : Optional[int] = None
def _snake_case ( self: int , a: str , a: Union[str, torch.device] = None ):
__lowerCamelCase : int = torch.linspace(1 , self.config.sampling_eps , a , device=a )
def _snake_case ( self: List[Any] , a: Union[str, Any] , a: Tuple , a: Optional[Any] , a: Dict=None ):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
__lowerCamelCase : Tuple = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
__lowerCamelCase : Optional[int] = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
__lowerCamelCase : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
__lowerCamelCase : List[str] = std.unsqueeze(-1 )
__lowerCamelCase : Any = -score / std
# compute
__lowerCamelCase : List[Any] = -1.0 / len(self.timesteps )
__lowerCamelCase : Any = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
__lowerCamelCase : Dict = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
__lowerCamelCase : int = beta_t.unsqueeze(-1 )
__lowerCamelCase : Any = -0.5 * beta_t * x
__lowerCamelCase : List[Any] = torch.sqrt(a )
__lowerCamelCase : Tuple = drift - diffusion**2 * score
__lowerCamelCase : str = x + drift * dt
# add noise
__lowerCamelCase : Any = randn_tensor(x.shape , layout=x.layout , generator=a , device=x.device , dtype=x.dtype )
__lowerCamelCase : Any = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self: Optional[int] ):
return self.config.num_train_timesteps
| 669 | 1 |
from ..utils import DummyObject, requires_backends
class A_ ( metaclass=__UpperCamelCase ):
'''simple docstring'''
__snake_case = ["""note_seq"""]
def __init__( self: List[Any] , *a: Optional[Any] , **a: str ):
requires_backends(self , ['note_seq'] )
@classmethod
def _snake_case ( cls: Optional[Any] , *a: Dict , **a: str ):
requires_backends(cls , ['note_seq'] )
@classmethod
def _snake_case ( cls: Dict , *a: Optional[Any] , **a: List[str] ):
requires_backends(cls , ['note_seq'] )
| 669 |
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : str = int(SCREAMING_SNAKE_CASE__ )
if n_element < 1:
__lowerCamelCase : str = ValueError('a should be a positive number' )
raise my_error
__lowerCamelCase : Tuple = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = (0, 0, 0)
__lowerCamelCase : Any = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
lowercase_ = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
lowercase_ = hamming(int(n))
print('-----------------------------------------------------')
print(F"""The list with nth numbers is: {hamming_numbers}""")
print('-----------------------------------------------------')
| 669 | 1 |
import tempfile
import unittest
import numpy as np
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax
if is_flax_available():
import os
from flax.core.frozen_dict import unfreeze
from flax.traverse_util import flatten_dict
from transformers import FlaxBertModel
lowercase_ = '0.12' # assumed parallelism: 8
@require_flax
@is_staging_test
class A_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _snake_case ( cls: List[Any] ):
__lowerCamelCase : Optional[Any] = TOKEN
HfFolder.save_token(a )
@classmethod
def _snake_case ( cls: List[Any] ):
try:
delete_repo(token=cls._token , repo_id='test-model-flax' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-model-flax-org' )
except HTTPError:
pass
def _snake_case ( self: Tuple ):
__lowerCamelCase : Any = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__lowerCamelCase : List[Any] = FlaxBertModel(a )
model.push_to_hub('test-model-flax' , use_auth_token=self._token )
__lowerCamelCase : Dict = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
__lowerCamelCase : Tuple = flatten_dict(unfreeze(model.params ) )
__lowerCamelCase : Union[str, Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowerCamelCase : Dict = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a , 1e-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id='test-model-flax' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(a , repo_id='test-model-flax' , push_to_hub=a , use_auth_token=self._token )
__lowerCamelCase : Dict = FlaxBertModel.from_pretrained(F'{USER}/test-model-flax' )
__lowerCamelCase : Any = flatten_dict(unfreeze(model.params ) )
__lowerCamelCase : Optional[Any] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowerCamelCase : Optional[int] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a , 1e-3 , msg=F'{key} not identical' )
def _snake_case ( self: Any ):
__lowerCamelCase : Dict = BertConfig(
vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 )
__lowerCamelCase : Optional[int] = FlaxBertModel(a )
model.push_to_hub('valid_org/test-model-flax-org' , use_auth_token=self._token )
__lowerCamelCase : List[Any] = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
__lowerCamelCase : str = flatten_dict(unfreeze(model.params ) )
__lowerCamelCase : List[str] = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowerCamelCase : List[str] = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a , 1e-3 , msg=F'{key} not identical' )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-model-flax-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(
a , repo_id='valid_org/test-model-flax-org' , push_to_hub=a , use_auth_token=self._token )
__lowerCamelCase : Optional[Any] = FlaxBertModel.from_pretrained('valid_org/test-model-flax-org' )
__lowerCamelCase : List[str] = flatten_dict(unfreeze(model.params ) )
__lowerCamelCase : str = flatten_dict(unfreeze(new_model.params ) )
for key in base_params.keys():
__lowerCamelCase : str = (base_params[key] - new_params[key]).sum().item()
self.assertLessEqual(a , 1e-3 , msg=F'{key} not identical' )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : Dict = flatten_dict(modela.params )
__lowerCamelCase : Tuple = flatten_dict(modela.params )
for key in flat_params_a.keys():
if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1e-4:
__lowerCamelCase : str = False
return models_are_equal
@require_flax
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
__lowerCamelCase : Any = FlaxBertModel(a )
__lowerCamelCase : List[str] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(a , a ) )
with self.assertRaises(a ):
__lowerCamelCase : int = FlaxBertModel.from_pretrained(a )
__lowerCamelCase : Optional[Any] = FlaxBertModel.from_pretrained(a , subfolder=a )
self.assertTrue(check_models_equal(a , a ) )
def _snake_case ( self: Any ):
__lowerCamelCase : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-bert-flax-only' )
__lowerCamelCase : Any = FlaxBertModel(a )
__lowerCamelCase : Optional[int] = 'bert'
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(os.path.join(a , a ) , max_shard_size='10KB' )
with self.assertRaises(a ):
__lowerCamelCase : Any = FlaxBertModel.from_pretrained(a )
__lowerCamelCase : int = FlaxBertModel.from_pretrained(a , subfolder=a )
self.assertTrue(check_models_equal(a , a ) )
def _snake_case ( self: Dict ):
__lowerCamelCase : Tuple = 'bert'
__lowerCamelCase : List[Any] = 'hf-internal-testing/tiny-random-bert-subfolder'
with self.assertRaises(a ):
__lowerCamelCase : Any = FlaxBertModel.from_pretrained(a )
__lowerCamelCase : Optional[Any] = FlaxBertModel.from_pretrained(a , subfolder=a )
self.assertIsNotNone(a )
def _snake_case ( self: Tuple ):
__lowerCamelCase : Optional[Any] = 'bert'
__lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-bert-sharded-subfolder'
with self.assertRaises(a ):
__lowerCamelCase : Any = FlaxBertModel.from_pretrained(a )
__lowerCamelCase : List[Any] = FlaxBertModel.from_pretrained(a , subfolder=a )
self.assertIsNotNone(a )
| 669 |
import unittest
from knapsack import greedy_knapsack as kp
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = [10, 20, 30, 40, 50, 60]
__lowerCamelCase : List[str] = [2, 4, 6, 8, 10, 12]
__lowerCamelCase : Tuple = 100
self.assertEqual(kp.calc_profit(a , a , a ) , 210 )
def _snake_case ( self: str ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'Weight can not be negative.' )
def _snake_case ( self: Dict ):
self.assertRaisesRegex(a , 'Profit can not be negative.' )
def _snake_case ( self: List[str] ):
self.assertRaisesRegex(a , 'max_weight must greater than zero.' )
def _snake_case ( self: Any ):
self.assertRaisesRegex(
a , 'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 669 | 1 |
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class A_ :
'''simple docstring'''
def _snake_case ( self: Union[str, Any] , a: str ):
raise NotImplementedError()
def _snake_case ( self: List[str] ):
raise NotImplementedError()
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: str , a: "AutoTokenizer" , a: bool = False , **a: List[str] ):
__lowerCamelCase : Dict = tokenizer
__lowerCamelCase : List[Any] = skip_prompt
__lowerCamelCase : List[Any] = decode_kwargs
# variables used in the streaming process
__lowerCamelCase : List[Any] = []
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : List[str] = True
def _snake_case ( self: Union[str, Any] , a: str ):
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError('TextStreamer only supports batch size 1' )
elif len(value.shape ) > 1:
__lowerCamelCase : Union[str, Any] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__lowerCamelCase : Optional[Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
__lowerCamelCase : int = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith('\n' ):
__lowerCamelCase : List[Any] = text[self.print_len :]
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Dict = 0
# If the last token is a CJK character, we print the characters.
elif len(a ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
__lowerCamelCase : Tuple = text[self.print_len :]
self.print_len += len(a )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__lowerCamelCase : Any = text[self.print_len : text.rfind(' ' ) + 1]
self.print_len += len(a )
self.on_finalized_text(a )
def _snake_case ( self: List[Any] ):
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
__lowerCamelCase : Any = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
__lowerCamelCase : Dict = text[self.print_len :]
__lowerCamelCase : Dict = []
__lowerCamelCase : Optional[int] = 0
else:
__lowerCamelCase : Optional[Any] = ''
__lowerCamelCase : int = True
self.on_finalized_text(a , stream_end=a )
def _snake_case ( self: str , a: str , a: bool = False ):
print(a , flush=a , end='' if not stream_end else None )
def _snake_case ( self: int , a: List[Any] ):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4e00 and cp <= 0X9fff)
or (cp >= 0X3400 and cp <= 0X4dbf) #
or (cp >= 0X2_0000 and cp <= 0X2_a6df) #
or (cp >= 0X2_a700 and cp <= 0X2_b73f) #
or (cp >= 0X2_b740 and cp <= 0X2_b81f) #
or (cp >= 0X2_b820 and cp <= 0X2_ceaf) #
or (cp >= 0Xf900 and cp <= 0Xfaff)
or (cp >= 0X2_f800 and cp <= 0X2_fa1f) #
): #
return True
return False
class A_ ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self: int , a: "AutoTokenizer" , a: bool = False , a: Optional[float] = None , **a: Tuple ):
super().__init__(a , a , **a )
__lowerCamelCase : Optional[int] = Queue()
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Union[str, Any] = timeout
def _snake_case ( self: Optional[Any] , a: str , a: bool = False ):
self.text_queue.put(a , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self: int ):
return self
def _snake_case ( self: List[str] ):
__lowerCamelCase : Dict = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 669 |
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A_ :
'''simple docstring'''
def __init__( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any]=2 , a: str=3 , a: Any=4 , a: Union[str, Any]=2 , a: Tuple=7 , a: int=True , a: Tuple=True , a: List[str]=True , a: Union[str, Any]=True , a: str=99 , a: Tuple=36 , a: int=2 , a: Dict=4 , a: Union[str, Any]=37 , a: List[str]="gelu" , a: List[Any]=0.1 , a: Optional[int]=0.1 , a: Dict=512 , a: Union[str, Any]=16 , a: str=2 , a: int=0.0_2 , a: Optional[Any]=6 , a: Optional[int]=6 , a: Dict=3 , a: Optional[Any]=4 , a: Optional[Any]=None , a: Dict=1000 , ):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : str = image_size
__lowerCamelCase : int = patch_size
__lowerCamelCase : List[str] = is_training
__lowerCamelCase : Dict = use_input_mask
__lowerCamelCase : Any = use_token_type_ids
__lowerCamelCase : List[str] = use_labels
__lowerCamelCase : str = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : List[Any] = intermediate_size
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = hidden_dropout_prob
__lowerCamelCase : Optional[int] = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Tuple = type_vocab_size
__lowerCamelCase : int = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[str] = coordinate_size
__lowerCamelCase : int = shape_size
__lowerCamelCase : Union[str, Any] = num_labels
__lowerCamelCase : int = num_choices
__lowerCamelCase : int = scope
__lowerCamelCase : Any = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__lowerCamelCase : Any = text_seq_length
__lowerCamelCase : Optional[Any] = (image_size // patch_size) ** 2 + 1
__lowerCamelCase : Any = self.text_seq_length + self.image_seq_length
def _snake_case ( self: List[str] ):
__lowerCamelCase : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
__lowerCamelCase : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__lowerCamelCase : List[str] = bbox[i, j, 3]
__lowerCamelCase : str = bbox[i, j, 1]
__lowerCamelCase : Dict = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
__lowerCamelCase : Tuple = bbox[i, j, 2]
__lowerCamelCase : Any = bbox[i, j, 0]
__lowerCamelCase : List[str] = tmp_coordinate
__lowerCamelCase : str = tf.constant(a )
__lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Any = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.text_seq_length] )
__lowerCamelCase : Tuple = None
if self.use_token_type_ids:
__lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__lowerCamelCase : Dict = None
__lowerCamelCase : Union[str, Any] = None
if self.use_labels:
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__lowerCamelCase : Dict = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _snake_case ( self: Tuple , a: List[Any] , a: Any , a: List[str] , a: Dict , a: Optional[Any] , a: Dict ):
__lowerCamelCase : Optional[Any] = TFLayoutLMvaModel(config=a )
# text + image
__lowerCamelCase : Optional[Any] = model(a , pixel_values=a , training=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , training=a , )
__lowerCamelCase : List[Any] = model(a , bbox=a , pixel_values=a , training=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__lowerCamelCase : List[Any] = model(a , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__lowerCamelCase : Optional[Any] = model({'pixel_values': pixel_values} , training=a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _snake_case ( self: Dict , a: Dict , a: Optional[Any] , a: int , a: Optional[int] , a: List[str] , a: List[str] , a: List[str] ):
__lowerCamelCase : List[str] = self.num_labels
__lowerCamelCase : str = TFLayoutLMvaForSequenceClassification(config=a )
__lowerCamelCase : int = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self: Optional[int] , a: Union[str, Any] , a: Union[str, Any] , a: Dict , a: Optional[Any] , a: Tuple , a: Optional[Any] , a: List[Any] ):
__lowerCamelCase : Union[str, Any] = self.num_labels
__lowerCamelCase : Any = TFLayoutLMvaForTokenClassification(config=a )
__lowerCamelCase : Optional[Any] = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , labels=a , training=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _snake_case ( self: Dict , a: Optional[Any] , a: str , a: Dict , a: Union[str, Any] , a: List[Any] , a: Optional[int] , a: List[str] ):
__lowerCamelCase : List[Any] = 2
__lowerCamelCase : Any = TFLayoutLMvaForQuestionAnswering(config=a )
__lowerCamelCase : Any = model(
a , bbox=a , pixel_values=a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , training=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self: List[Any] ):
__lowerCamelCase : str = self.prepare_config_and_inputs()
((__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase) , (__lowerCamelCase)) : List[Any] = config_and_inputs
__lowerCamelCase : Tuple = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_tf
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
__snake_case = (
{"""document-question-answering""": TFLayoutLMvaForQuestionAnswering, """feature-extraction""": TFLayoutLMvaModel}
if is_tf_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
def _snake_case ( self: int , a: List[str] , a: Any , a: Optional[Any] , a: Tuple , a: Tuple ):
return True
def _snake_case ( self: str , a: Any , a: Any , a: Optional[int]=False ):
__lowerCamelCase : List[str] = copy.deepcopy(a )
if model_class in get_values(a ):
__lowerCamelCase : Tuple = {
k: tf.tile(tf.expand_dims(a , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(a , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(a ):
__lowerCamelCase : Any = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
__lowerCamelCase : Optional[Any] = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(a ):
__lowerCamelCase : Dict = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def _snake_case ( self: Tuple ):
__lowerCamelCase : int = TFLayoutLMvaModelTester(self )
__lowerCamelCase : str = ConfigTester(self , config_class=a , hidden_size=37 )
def _snake_case ( self: Union[str, Any] ):
self.config_tester.run_common_tests()
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(a )
if getattr(a , 'hf_compute_loss' , a ):
# The number of elements in the loss should be the same as the number of elements in the label
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : int = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=a )[0]
]
__lowerCamelCase : Dict = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
__lowerCamelCase : Optional[int] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : Dict = prepared_for_class.pop('input_ids' )
__lowerCamelCase : str = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
__lowerCamelCase : List[Any] = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : List[str] = prepared_for_class.pop('input_ids' )
if "labels" in prepared_for_class:
__lowerCamelCase : int = prepared_for_class['labels'].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
__lowerCamelCase : Tuple = -100
__lowerCamelCase : Tuple = tf.convert_to_tensor(a )
__lowerCamelCase : Tuple = model(a , **a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
__lowerCamelCase : int = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
__lowerCamelCase : str = model(a )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
__lowerCamelCase : str = self._prepare_for_class(inputs_dict.copy() , a , return_labels=a )
# Get keys that were added with the _prepare_for_class function
__lowerCamelCase : Optional[Any] = prepared_for_class.keys() - inputs_dict.keys()
__lowerCamelCase : List[Any] = inspect.signature(model.call ).parameters
__lowerCamelCase : List[str] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
__lowerCamelCase : Optional[int] = {0: 'input_ids'}
for label_key in label_keys:
__lowerCamelCase : Dict = signature_names.index(a )
__lowerCamelCase : str = label_key
__lowerCamelCase : List[str] = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
__lowerCamelCase : Optional[int] = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
__lowerCamelCase : Optional[int] = prepared_for_class[value]
__lowerCamelCase : Any = tuple(a )
# Send to model
__lowerCamelCase : int = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def _snake_case ( self: List[str] ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: int ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Union[str, Any] = type
self.model_tester.create_and_check_model(a , a , a , a , a , a )
def _snake_case ( self: Dict ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
a , a , a , a , a , a , a )
def _snake_case ( self: str ):
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
a , a , a , a , a , a , a )
@slow
def _snake_case ( self: int ):
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = TFLayoutLMvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def UpperCamelCase__ ( ):
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self: Optional[int] ):
return LayoutLMvaImageProcessor(apply_ocr=a ) if is_vision_available() else None
@slow
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Tuple = TFLayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base' )
__lowerCamelCase : Union[str, Any] = self.default_image_processor
__lowerCamelCase : List[Any] = prepare_img()
__lowerCamelCase : str = image_processor(images=a , return_tensors='tf' ).pixel_values
__lowerCamelCase : Union[str, Any] = tf.constant([[1, 2]] )
__lowerCamelCase : str = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
__lowerCamelCase : int = model(input_ids=a , bbox=a , pixel_values=a , training=a )
# verify the logits
__lowerCamelCase : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , a )
__lowerCamelCase : Any = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , a , atol=1e-4 ) )
| 669 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'google/mobilenet_v2_1.4_224': 'https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json',
'google/mobilenet_v2_1.0_224': 'https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json',
'google/mobilenet_v2_0.75_160': 'https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json',
'google/mobilenet_v2_0.35_96': 'https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """mobilenet_v2"""
def __init__( self: str , a: Tuple=3 , a: str=224 , a: Optional[Any]=1.0 , a: int=8 , a: Any=8 , a: Any=6 , a: Union[str, Any]=32 , a: List[str]=True , a: int=True , a: Optional[int]="relu6" , a: Dict=True , a: Tuple=0.8 , a: Any=0.0_2 , a: List[str]=0.0_0_1 , a: Optional[Any]=255 , **a: Any , ):
super().__init__(**a )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
__lowerCamelCase : str = num_channels
__lowerCamelCase : List[str] = image_size
__lowerCamelCase : Optional[Any] = depth_multiplier
__lowerCamelCase : Tuple = depth_divisible_by
__lowerCamelCase : List[Any] = min_depth
__lowerCamelCase : List[Any] = expand_ratio
__lowerCamelCase : Any = output_stride
__lowerCamelCase : Union[str, Any] = first_layer_is_expansion
__lowerCamelCase : Union[str, Any] = finegrained_output
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : int = tf_padding
__lowerCamelCase : Tuple = classifier_dropout_prob
__lowerCamelCase : Dict = initializer_range
__lowerCamelCase : Union[str, Any] = layer_norm_eps
__lowerCamelCase : Union[str, Any] = semantic_loss_ignore_index
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = version.parse("""1.11""" )
@property
def _snake_case ( self: Union[str, Any] ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def _snake_case ( self: int ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def _snake_case ( self: List[str] ):
return 1e-4
| 669 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A_ ( __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = CLIPTokenizer
__snake_case = CLIPTokenizerFast
__snake_case = True
__snake_case = {}
__snake_case = False
def _snake_case ( self: Union[str, Any] ):
super().setUp()
# fmt: off
__lowerCamelCase : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCamelCase : Tuple = dict(zip(a , range(len(a ) ) ) )
__lowerCamelCase : List[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>']
__lowerCamelCase : Tuple = {'unk_token': '<unk>'}
__lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _snake_case ( self: Tuple , **a: Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Union[str, Any] , **a: List[str] ):
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a )
def _snake_case ( self: Optional[int] , a: List[Any] ):
__lowerCamelCase : Tuple = 'lower newer'
__lowerCamelCase : Tuple = 'lower newer'
return input_text, output_text
def _snake_case ( self: List[str] ):
__lowerCamelCase : List[Any] = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCamelCase : Optional[Any] = 'lower newer'
__lowerCamelCase : int = ['lo', 'w', 'er</w>', 'n', 'e', 'w', 'er</w>']
__lowerCamelCase : Optional[int] = tokenizer.tokenize(a )
self.assertListEqual(a , a )
__lowerCamelCase : int = tokens + [tokenizer.unk_token]
__lowerCamelCase : int = [10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@require_ftfy
def _snake_case ( self: Union[str, Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : int = self.rust_tokenizer_class.from_pretrained(a , **a )
__lowerCamelCase : str = 'A\n\'ll 11p223RF☆ho!!to?\'d\'d\'\'d of a cat to-$\'\'d.'
__lowerCamelCase : Optional[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[Any] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__lowerCamelCase : List[Any] = 'xa\u0303y' + ' ' + 'x\xe3y'
__lowerCamelCase : Tuple = tokenizer_s.tokenize(a )
__lowerCamelCase : Any = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of space type
__lowerCamelCase : List[Any] = [
'\u0009', # (horizontal tab, '\t')
'\u000B', # (vertical tab)
'\u000C', # (form feed)
'\u0020', # (space, ' ')
'\u200E', # (left-to-right mark):w
'\u200F', # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__lowerCamelCase : List[Any] = tokenizer_s.tokenize(a )
__lowerCamelCase : Optional[int] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
# Test that the tokenization is identical on unicode of line break type
__lowerCamelCase : str = [
'\u000A', # (line feed, '\n')
'\r\n', # (carriage return and line feed, '\r\n')
'\u000D', # (carriage return, '\r')
'\r', # (carriage return, '\r')
'\u000D', # (carriage return, '\r')
'\u2028', # (line separator)
'\u2029', # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__lowerCamelCase : Dict = tokenizer_s.tokenize(a )
__lowerCamelCase : List[str] = tokenizer_r.tokenize(a )
self.assertListEqual(a , a )
def _snake_case ( self: List[Any] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCamelCase : Optional[int] = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
__lowerCamelCase : Optional[int] = F'{text_of_1_token} {text_of_1_token}'
__lowerCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , )
__lowerCamelCase : List[Any] = F' {text}'
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(
a , use_fast=a , )
__lowerCamelCase : Any = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , )
def _snake_case ( self: str ):
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(a ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _snake_case ( self: Tuple ):
super().test_tokenization_python_rust_equals()
def _snake_case ( self: Tuple ):
# CLIP always lower cases letters
pass
| 669 | 1 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: int ):
__lowerCamelCase : Optional[int] = 'hf-internal-testing/tiny-random-t5'
__lowerCamelCase : Any = AutoTokenizer.from_pretrained(a )
__lowerCamelCase : int = AutoModelForSeqaSeqLM.from_pretrained(a )
__lowerCamelCase : Union[str, Any] = tokenizer('This is me' , return_tensors='pt' )
__lowerCamelCase : Dict = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__lowerCamelCase : int = model.generate(**a )
__lowerCamelCase : List[str] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
__lowerCamelCase : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(a )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__lowerCamelCase : Dict = model_reloaded.generate(**a )
self.assertTrue(torch.allclose(a , a ) )
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Union[str, Any] = 'hf-internal-testing/tiny-random-t5'
__lowerCamelCase : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(a )
__lowerCamelCase : List[str] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(a ):
model.save_pretrained(a )
__lowerCamelCase : List[Any] = model.reverse_bettertransformer()
model.save_pretrained(a )
| 669 |
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
lowercase_ = False
try:
lowercase_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class A_ :
'''simple docstring'''
def __init__( self: int , a: str = None , a: list = [] ):
__lowerCamelCase : Dict = 0
__lowerCamelCase : Dict = choices
__lowerCamelCase : Tuple = prompt
if sys.platform == "win32":
__lowerCamelCase : Union[str, Any] = '*'
else:
__lowerCamelCase : Any = '➔ '
def _snake_case ( self: Any , a: Tuple , a: str = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , a )
else:
forceWrite(self.choices[index] , a )
def _snake_case ( self: Tuple , a: int ):
if index == self.position:
forceWrite(F' {self.arrow_char} ' )
self.write_choice(a )
else:
forceWrite(F' {self.choices[index]}' )
reset_cursor()
def _snake_case ( self: Optional[int] , a: Direction , a: int = 1 ):
__lowerCamelCase : str = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(a )
move_cursor(a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def _snake_case ( self: Tuple ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def _snake_case ( self: Optional[int] ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def _snake_case ( self: str ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def _snake_case ( self: Union[str, Any] ):
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(a )] for number in range(10 )] )
def _snake_case ( self: str ):
__lowerCamelCase : List[Any] = int(chr(self.current_selection ) )
__lowerCamelCase : Any = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , a )
else:
return
else:
return
def _snake_case ( self: str , a: int = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
__lowerCamelCase : Dict = default_choice
for i in range(len(self.choices ) ):
self.print_choice(a )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
__lowerCamelCase : Any = int(builtins.input() )
except ValueError:
__lowerCamelCase : str = default_choice
else:
__lowerCamelCase : Optional[int] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(a , '\n' )
return choice
| 669 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.