code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class snake_case ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
lowerCamelCase__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCamelCase__ = (
{
"feature-extraction": TFMobileBertModel,
"fill-mask": TFMobileBertForMaskedLM,
"question-answering": TFMobileBertForQuestionAnswering,
"text-classification": TFMobileBertForSequenceClassification,
"token-classification": TFMobileBertForTokenClassification,
"zero-shot": TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :int , _lowerCamelCase :str , _lowerCamelCase :Optional[int]=False ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class in get_values(lowerCamelCase_ ):
__SCREAMING_SNAKE_CASE : str = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class snake_case ( __UpperCAmelCase ):
def __init__( self :Dict , _lowerCamelCase :Any , _lowerCamelCase :str=1_3 , _lowerCamelCase :List[str]=7 , _lowerCamelCase :Optional[Any]=True , _lowerCamelCase :Union[str, Any]=True , _lowerCamelCase :Any=True , _lowerCamelCase :int=True , _lowerCamelCase :int=9_9 , _lowerCamelCase :List[Any]=3_2 , _lowerCamelCase :str=3_2 , _lowerCamelCase :List[str]=2 , _lowerCamelCase :int=4 , _lowerCamelCase :List[str]=3_7 , _lowerCamelCase :int="gelu" , _lowerCamelCase :str=0.1 , _lowerCamelCase :Any=0.1 , _lowerCamelCase :Optional[int]=5_1_2 , _lowerCamelCase :List[Any]=1_6 , _lowerCamelCase :List[str]=2 , _lowerCamelCase :int=0.0_2 , _lowerCamelCase :int=3 , _lowerCamelCase :Any=4 , _lowerCamelCase :Optional[int]=None , ):
__SCREAMING_SNAKE_CASE : Tuple = parent
__SCREAMING_SNAKE_CASE : int = batch_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = seq_length
__SCREAMING_SNAKE_CASE : Optional[Any] = is_training
__SCREAMING_SNAKE_CASE : Optional[Any] = use_input_mask
__SCREAMING_SNAKE_CASE : Dict = use_token_type_ids
__SCREAMING_SNAKE_CASE : Optional[int] = use_labels
__SCREAMING_SNAKE_CASE : str = vocab_size
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
__SCREAMING_SNAKE_CASE : Any = num_hidden_layers
__SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
__SCREAMING_SNAKE_CASE : Dict = intermediate_size
__SCREAMING_SNAKE_CASE : Tuple = hidden_act
__SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : Dict = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Dict = type_vocab_size
__SCREAMING_SNAKE_CASE : Any = type_sequence_label_size
__SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = num_choices
__SCREAMING_SNAKE_CASE : Optional[int] = scope
__SCREAMING_SNAKE_CASE : Any = embedding_size
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE : Any = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE : str = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE : int = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : Union[str, Any] = None
__SCREAMING_SNAKE_CASE : Optional[int] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
__SCREAMING_SNAKE_CASE : Optional[int] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :Any , _lowerCamelCase :List[Any] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Optional[Any] ):
__SCREAMING_SNAKE_CASE : Any = TFMobileBertModel(config=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Dict = [input_ids, input_mask]
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Any = model(lowerCamelCase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self :str , _lowerCamelCase :Any , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Any , _lowerCamelCase :Any , _lowerCamelCase :int , _lowerCamelCase :Optional[int] , _lowerCamelCase :Dict ):
__SCREAMING_SNAKE_CASE : str = TFMobileBertForMaskedLM(config=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , _lowerCamelCase :List[Any] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Optional[int] , _lowerCamelCase :str , _lowerCamelCase :int , _lowerCamelCase :Any , _lowerCamelCase :Optional[int] ):
__SCREAMING_SNAKE_CASE : Tuple = TFMobileBertForNextSentencePrediction(config=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : str = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , _lowerCamelCase :Dict , _lowerCamelCase :Optional[int] , _lowerCamelCase :int , _lowerCamelCase :Optional[int] , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Any , _lowerCamelCase :List[str] ):
__SCREAMING_SNAKE_CASE : List[str] = TFMobileBertForPreTraining(config=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :Tuple , _lowerCamelCase :Any , _lowerCamelCase :Optional[int] , _lowerCamelCase :int , _lowerCamelCase :Any , _lowerCamelCase :Any , _lowerCamelCase :Dict ):
__SCREAMING_SNAKE_CASE : List[Any] = self.num_labels
__SCREAMING_SNAKE_CASE : str = TFMobileBertForSequenceClassification(config=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : List[Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Optional[Any] , _lowerCamelCase :str , _lowerCamelCase :Optional[int] , _lowerCamelCase :List[str] , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Optional[Any] ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.num_choices
__SCREAMING_SNAKE_CASE : str = TFMobileBertForMultipleChoice(config=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Any = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : Optional[int] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : List[Any] = tf.tile(tf.expand_dims(lowerCamelCase_ , 1 ) , (1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE : List[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__SCREAMING_SNAKE_CASE : Union[str, Any] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self :Tuple , _lowerCamelCase :int , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Dict , _lowerCamelCase :Optional[int] , _lowerCamelCase :Optional[int] , _lowerCamelCase :int ):
__SCREAMING_SNAKE_CASE : Any = self.num_labels
__SCREAMING_SNAKE_CASE : Optional[int] = TFMobileBertForTokenClassification(config=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : Optional[int] = model(lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self :int , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Any , _lowerCamelCase :str , _lowerCamelCase :str , _lowerCamelCase :List[str] , _lowerCamelCase :List[str] , _lowerCamelCase :List[Any] ):
__SCREAMING_SNAKE_CASE : Tuple = TFMobileBertForQuestionAnswering(config=lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) : Dict = config_and_inputs
__SCREAMING_SNAKE_CASE : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = TFMobileBertModelTest.TFMobileBertModelTester(self )
__SCREAMING_SNAKE_CASE : Tuple = ConfigTester(self , config_class=lowerCamelCase_ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self :Tuple ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
__SCREAMING_SNAKE_CASE : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE_ ( self :int ):
__SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
__SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*lowerCamelCase_ )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] ):
__SCREAMING_SNAKE_CASE : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*lowerCamelCase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self :Any ):
for model_name in ["google/mobilebert-uncased"]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = TFMobileBertModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_tf
class snake_case ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self :str ):
__SCREAMING_SNAKE_CASE : str = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
__SCREAMING_SNAKE_CASE : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE : int = model(lowerCamelCase_ )[0]
__SCREAMING_SNAKE_CASE : List[Any] = [1, 6, 3_0_5_2_2]
self.assertEqual(output.shape , lowerCamelCase_ )
__SCREAMING_SNAKE_CASE : Tuple = tf.constant(
[
[
[-4.5_9_1_9_5_4_7, -9.2_4_8_2_9_5, -9.6_4_5_2_5_6],
[-6.7_3_0_6_1_7_5, -6.4_4_0_2_8_4, -6.6_0_5_2_8_3_7],
[-7.2_7_4_3_5_0_6, -6.7_8_4_7_9_1_5, -6.0_2_4_6_7_3],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase_ , atol=1e-4 )
| 674
|
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
__lowerCAmelCase = TypeVar("""T""")
__lowerCAmelCase = TypeVar("""U""")
class lowerCamelCase_ ( Generic[T, U] ):
def __init__( self , lowerCamelCase_ , lowerCamelCase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = key
_UpperCamelCase = val
_UpperCamelCase = None
_UpperCamelCase = None
def __repr__( self ) -> str:
"""simple docstring"""
return (
f'''Node: key: {self.key}, val: {self.val}, '''
f'''has next: {bool(self.next )}, has prev: {bool(self.prev )}'''
)
class lowerCamelCase_ ( Generic[T, U] ):
def __init__( self ) -> None:
"""simple docstring"""
_UpperCamelCase = DoubleLinkedListNode(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase = DoubleLinkedListNode(lowerCamelCase_ , lowerCamelCase_ )
_UpperCamelCase , _UpperCamelCase = self.rear, self.head
def __repr__( self ) -> str:
"""simple docstring"""
_UpperCamelCase = ["DoubleLinkedList"]
_UpperCamelCase = self.head
while node.next is not None:
rep.append(str(lowerCamelCase_ ) )
_UpperCamelCase = node.next
rep.append(str(self.rear ) )
return ",\n ".join(lowerCamelCase_ )
def lowercase ( self , lowerCamelCase_ ) -> None:
"""simple docstring"""
_UpperCamelCase = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
_UpperCamelCase = node
_UpperCamelCase = previous
_UpperCamelCase = node
_UpperCamelCase = self.rear
def lowercase ( self , lowerCamelCase_ ) -> DoubleLinkedListNode[T, U] | None:
"""simple docstring"""
if node.prev is None or node.next is None:
return None
_UpperCamelCase = node.next
_UpperCamelCase = node.prev
_UpperCamelCase = None
_UpperCamelCase = None
return node
class lowerCamelCase_ ( Generic[T, U] ):
__lowercase : dict[Callable[[T], U], LRUCache[T, U]] = {}
def __init__( self , lowerCamelCase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = DoubleLinkedList()
_UpperCamelCase = capacity
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = {}
def __repr__( self ) -> str:
"""simple docstring"""
return (
f'''CacheInfo(hits={self.hits}, misses={self.miss}, '''
f'''capacity={self.capacity}, current size={self.num_keys})'''
)
def __contains__( self , lowerCamelCase_ ) -> bool:
"""simple docstring"""
return key in self.cache
def lowercase ( self , lowerCamelCase_ ) -> U | None:
"""simple docstring"""
if key in self.cache:
self.hits += 1
_UpperCamelCase = self.cache[key]
_UpperCamelCase = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowerCamelCase_ )
return node.val
self.miss += 1
return None
def lowercase ( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
"""simple docstring"""
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
_UpperCamelCase = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowerCamelCase_ ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
_UpperCamelCase = DoubleLinkedListNode(lowerCamelCase_ , lowerCamelCase_ )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
_UpperCamelCase = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
_UpperCamelCase = value
self.list.add(lowerCamelCase_ )
@classmethod
def lowercase ( cls , lowerCamelCase_ = 1_28 ) -> Callable[[Callable[[T], U]], Callable[..., U]]:
"""simple docstring"""
def cache_decorator_inner(lowerCamelCase_ ) -> Callable[..., U]:
def cache_decorator_wrapper(*lowerCamelCase_ ) -> U:
if func not in cls.decorator_function_to_instance_map:
_UpperCamelCase = LRUCache(lowerCamelCase_ )
_UpperCamelCase = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
_UpperCamelCase = func(*lowerCamelCase_ )
cls.decorator_function_to_instance_map[func].put(args[0] , lowerCamelCase_ )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowerCamelCase_ , "cache_info" , lowerCamelCase_ ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147
| 0
|
from __future__ import annotations
import math
from collections.abc import Callable
def __a ( A__ : Callable[[int | float], int | float] , A__ : int | float , A__ : int | float , A__ : int = 100 , ):
SCREAMING_SNAKE_CASE = x_start
SCREAMING_SNAKE_CASE = fnc(A__ )
SCREAMING_SNAKE_CASE = 0.0
for _ in range(A__ ):
# Approximates curve as a sequence of linear lines and sums their length
SCREAMING_SNAKE_CASE = (x_end - x_start) / steps + xa
SCREAMING_SNAKE_CASE = fnc(A__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
SCREAMING_SNAKE_CASE = xa
SCREAMING_SNAKE_CASE = fxa
return length
if __name__ == "__main__":
def __a ( A__ : Optional[Any] ):
return math.sin(10 * x )
print('f(x) = sin(10 * x)')
print('The length of the curve from x = -10 to x = 10 is:')
__A : Optional[int] = 1_0
while i <= 1_0_0_0_0_0:
print(f'With {i} steps: {line_length(f, -1_0, 1_0, i)}')
i *= 1_0
| 698
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def __a ( A__ : List[str] ):
SCREAMING_SNAKE_CASE = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def __a ( A__ : Tuple ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = emb.weight.shape
SCREAMING_SNAKE_CASE = nn.Linear(A__ , A__ , bias=A__ )
SCREAMING_SNAKE_CASE = emb.weight.data
return lin_layer
def __a ( A__ : Tuple , A__ : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE = {}
for old_key in state_dict.keys():
SCREAMING_SNAKE_CASE = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts.0" , F"ffn.experts.expert_{expert_idx}" )
else:
SCREAMING_SNAKE_CASE = key.replace("moe_layer.experts." , "ffn.experts.expert_" )
if "gate" in key:
SCREAMING_SNAKE_CASE = key.replace(".moe_layer.gate.wg" , ".ffn.router.classifier" )
if "fc2" and "experts" not in key:
SCREAMING_SNAKE_CASE = key.replace(".fc2." , ".ffn.fc2." )
if "fc1" and "experts" not in key:
SCREAMING_SNAKE_CASE = key.replace(".fc1." , ".ffn.fc1." )
if ".encoder_attn." in key:
SCREAMING_SNAKE_CASE = key.replace(".encoder_attn." , ".cross_attention." )
if "encoder_attn_layer_norm" in key:
SCREAMING_SNAKE_CASE = key.replace("encoder_attn_layer_norm" , "cross_attention_layer_norm" )
if "final_layer_norm" in key:
SCREAMING_SNAKE_CASE = key.replace("final_layer_norm" , "ff_layer_norm" )
SCREAMING_SNAKE_CASE = state_dict[old_key]
return new_dict
def __a ( A__ : List[str] , A__ : List[Any] , A__ : str , A__ : Union[str, Any] , A__ : str = WEIGHTS_NAME ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = 0
os.makedirs(A__ , exist_ok=A__ )
for expert in range(A__ ):
SCREAMING_SNAKE_CASE = switch_checkpoint_path + F"-rank-{expert}.pt"
if os.path.isfile(A__ ):
SCREAMING_SNAKE_CASE = torch.load(A__ )["model"]
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ )
SCREAMING_SNAKE_CASE = os.path.join(
A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) )
torch.save(A__ , A__ )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(A__ )[0]].dtype )
# Add the last block
SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{len(A__ )+1:05d}-of-???.bin" ) )
SCREAMING_SNAKE_CASE = torch.load(switch_checkpoint_path + "-shared.pt" )["model"]
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE = rename_fairseq_keys(A__ , A__ )
SCREAMING_SNAKE_CASE = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(A__ ) == 1:
SCREAMING_SNAKE_CASE = os.path.join(A__ , A__ )
torch.save(A__ , A__ )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(A__ , A__ )
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE = {}
for idx, shard in enumerate(A__ ):
SCREAMING_SNAKE_CASE = weights_name.replace(".bin" , F"-{idx+1:05d}-of-{len(A__ ):05d}.bin" )
SCREAMING_SNAKE_CASE = os.path.join(A__ , weights_name.replace(".bin" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(A__ , os.path.join(A__ , A__ ) )
for key in shard:
SCREAMING_SNAKE_CASE = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE = {"total_size": total_size}
SCREAMING_SNAKE_CASE = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(A__ , A__ ) , "w" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE = json.dumps(A__ , indent=2 , sort_keys=A__ ) + "\n"
f.write(A__ )
return metadata, index
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
__A : Optional[int] = parser.parse_args()
__A , __A : Union[str, Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
__A : Any = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
__A : Any = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 698
| 1
|
"""simple docstring"""
a :Union[str, Any] = {str(digit): digit**5 for digit in range(10)}
def _lowercase ( __lowerCAmelCase ) -> Optional[Any]:
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_snake_case ) )
def _lowercase ( ) -> List[str]:
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(_snake_case ) )
if __name__ == "__main__":
print(solution())
| 680
|
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCamelCase ( ):
UpperCAmelCase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' ,type=_snake_case ,default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' ,type=_snake_case ,default=5 )
parser.add_argument('--batch_size' ,type=_snake_case ,default=6 )
parser.add_argument('--gradient_accumulation_steps' ,type=_snake_case ,default=1 )
parser.add_argument('--freeze' ,type=_snake_case ,default=_snake_case )
parser.add_argument('--learning_rate' ,type=_snake_case ,default=5e-4 )
parser.add_argument('--seed' ,type=_snake_case ,default=0 )
parser.add_argument('--lr_scheduler_type' ,type=_snake_case ,default='cosine' )
parser.add_argument('--num_warmup_steps' ,type=_snake_case ,default=10 )
parser.add_argument('--weight_decay' ,type=_snake_case ,default=0.01 )
parser.add_argument('--output_dir' ,type=_snake_case ,default='./results' )
return parser.parse_args()
UpperCamelCase__ = load('accuracy')
def lowerCamelCase ( _snake_case ):
UpperCAmelCase__ , UpperCAmelCase__ : int = eval_pred
UpperCAmelCase__ : Optional[int] = np.argmax(_snake_case ,axis=1 )
return metric.compute(predictions=_snake_case ,references=_snake_case )
class a ( lowercase ):
def __init__( self , UpperCamelCase_ ):
super().__init__()
UpperCAmelCase__ : List[str] = trainer
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ):
if control.should_evaluate:
UpperCAmelCase__ : int = deepcopy(UpperCamelCase_ )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def lowerCamelCase ( ):
UpperCAmelCase__ : int = get_args()
set_seed(args.seed )
UpperCAmelCase__ : Optional[int] = load_dataset('codeparrot/codecomplex' ,split='train' )
UpperCAmelCase__ : Tuple = dataset.train_test_split(test_size=0.2 )
UpperCAmelCase__ : List[Any] = train_test['test'].train_test_split(test_size=0.5 )
UpperCAmelCase__ : Tuple = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
UpperCAmelCase__ : str = AutoTokenizer.from_pretrained(args.model_ckpt )
UpperCAmelCase__ : Optional[Any] = tokenizer.eos_token
UpperCAmelCase__ : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt ,num_labels=7 )
UpperCAmelCase__ : Dict = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[Any] = ClassLabel(num_classes=7 ,names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(_snake_case ):
UpperCAmelCase__ : Dict = tokenizer(example['src'] ,truncation=_snake_case ,max_length=1024 )
UpperCAmelCase__ : Any = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
UpperCAmelCase__ : List[Any] = train_test_validation.map(
_snake_case ,batched=_snake_case ,remove_columns=train_test_validation['train'].column_names ,)
UpperCAmelCase__ : Tuple = DataCollatorWithPadding(tokenizer=_snake_case )
UpperCAmelCase__ : Union[str, Any] = TrainingArguments(
output_dir=args.output_dir ,learning_rate=args.learning_rate ,lr_scheduler_type=args.lr_scheduler_type ,evaluation_strategy='epoch' ,save_strategy='epoch' ,logging_strategy='epoch' ,per_device_train_batch_size=args.batch_size ,per_device_eval_batch_size=args.batch_size ,num_train_epochs=args.num_epochs ,gradient_accumulation_steps=args.gradient_accumulation_steps ,weight_decay=0.01 ,metric_for_best_model='accuracy' ,run_name='complexity-java' ,report_to='wandb' ,)
UpperCAmelCase__ : Dict = Trainer(
model=_snake_case ,args=_snake_case ,train_dataset=tokenized_datasets['train'] ,eval_dataset=tokenized_datasets['valid'] ,tokenizer=_snake_case ,data_collator=_snake_case ,compute_metrics=_snake_case ,)
print('Training...' )
trainer.add_callback(CustomCallback(_snake_case ) )
trainer.train()
if __name__ == "__main__":
main()
| 110
| 0
|
from sklearn.metrics import recall_score
import datasets
a_ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
a_ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
a_ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase="binary" , __UpperCAmelCase=None , __UpperCAmelCase="warn" , ):
'''simple docstring'''
__lowerCamelCase = recall_score(
__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase , pos_label=__UpperCAmelCase , average=__UpperCAmelCase , sample_weight=__UpperCAmelCase , zero_division=__UpperCAmelCase , )
return {"recall": float(__UpperCAmelCase ) if score.size == 1 else score}
| 622
|
def a__ ( _UpperCamelCase : str ,_UpperCamelCase : str = " " ):
__lowerCamelCase = []
__lowerCamelCase = 0
for index, char in enumerate(_UpperCamelCase ):
if char == separator:
split_words.append(string[last_index:index] )
__lowerCamelCase = index + 1
elif index + 1 == len(_UpperCamelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 622
| 1
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> Dict:
"""simple docstring"""
def is_in_circle(_SCREAMING_SNAKE_CASE : float , _SCREAMING_SNAKE_CASE : float ) -> bool:
UpperCAmelCase_ : Dict = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase_ : List[Any] = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(_SCREAMING_SNAKE_CASE ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase_ : Optional[int] = proportion * 4
print(F'''The estimated value of pi is {pi_estimate}''' )
print(F'''The numpy value of pi is {pi}''' )
print(F'''The total error is {abs(pi - pi_estimate )}''' )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Callable[[float], float] , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 , ) -> float:
"""simple docstring"""
return mean(
function_to_integrate(uniform(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for _ in range(_SCREAMING_SNAKE_CASE ) ) * (max_value - min_value)
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : float = 0.0 , _SCREAMING_SNAKE_CASE : float = 1.0 ) -> None:
"""simple docstring"""
def identity_function(_SCREAMING_SNAKE_CASE : float ) -> float:
return x
UpperCAmelCase_ : Optional[Any] = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(F'''Estimating area under y=x where x varies from {min_value} to {max_value}''' )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {expected_value}''' )
print(F'''Total error is {abs(estimated_value - expected_value )}''' )
print("******************" )
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> None:
"""simple docstring"""
def function_to_integrate(_SCREAMING_SNAKE_CASE : float ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase_ : List[Any] = area_under_curve_estimator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(F'''Estimated value is {estimated_value}''' )
print(F'''Expected value is {pi}''' )
print(F'''Total error is {abs(estimated_value - pi )}''' )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 71
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained config name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"})
__A : Optional[str] =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether tp freeze the encoder."})
__A : bool =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to freeze the embeddings."})
@dataclass
class _snake_case :
__A : str =field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."})
__A : Optional[str] =field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
__A : Optional[int] =field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
__A : Optional[int] =field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
__A : Optional[int] =field(default=-1 , metadata={"help": "# training examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# validation examples. -1 means use all."})
__A : Optional[int] =field(default=-1 , metadata={"help": "# test examples. -1 means use all."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Source language id for translation."})
__A : Optional[str] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Target language id for translation."})
__A : Optional[int] =field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "# num_beams to use for evaluation."})
__A : bool =field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , F'''{split}_results.json''' ) )
def a__ ( ) -> Any:
"""simple docstring"""
UpperCAmelCase_ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = parser.parse_args_into_dataclasses()
check_output_dir(_SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , _SCREAMING_SNAKE_CASE )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : List[Any] = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
assert hasattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
UpperCAmelCase_ : Union[str, Any] = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_SCREAMING_SNAKE_CASE , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
UpperCAmelCase_ : Dict = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
UpperCAmelCase_ : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_SCREAMING_SNAKE_CASE )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
UpperCAmelCase_ : Dict = SeqaSeqDataset
# Get datasets
UpperCAmelCase_ : Tuple = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
UpperCAmelCase_ : Dict = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
UpperCAmelCase_ : int = (
dataset_class(
_SCREAMING_SNAKE_CASE , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
UpperCAmelCase_ : Optional[Any] = (
build_compute_metrics_fn(data_args.task , _SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None
)
UpperCAmelCase_ : List[str] = SeqaSeqTrainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , data_args=_SCREAMING_SNAKE_CASE , train_dataset=_SCREAMING_SNAKE_CASE , eval_dataset=_SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ : List[Any] = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
UpperCAmelCase_ : Any = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
UpperCAmelCase_ : int = train_result.metrics
UpperCAmelCase_ : Dict = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ : Union[str, Any] = trainer.evaluate(metric_key_prefix="val" )
UpperCAmelCase_ : Optional[Any] = data_args.n_val
UpperCAmelCase_ : Union[str, Any] = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
UpperCAmelCase_ : List[Any] = trainer.predict(test_dataset=_SCREAMING_SNAKE_CASE , metric_key_prefix="test" )
UpperCAmelCase_ : List[str] = test_output.metrics
UpperCAmelCase_ : int = data_args.n_test
if trainer.is_world_process_zero():
UpperCAmelCase_ : Optional[Any] = round(metrics["test_loss"] , 4 )
handle_metrics("test" , _SCREAMING_SNAKE_CASE , training_args.output_dir )
all_metrics.update(_SCREAMING_SNAKE_CASE )
if training_args.predict_with_generate:
UpperCAmelCase_ : Optional[int] = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = lmap(str.strip , _SCREAMING_SNAKE_CASE )
write_txt_file(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(_SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def a__ ( _SCREAMING_SNAKE_CASE : str ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 71
| 1
|
"""simple docstring"""
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase):
@slow
def _snake_case ( self : List[Any] ):
snake_case_ : List[Any] = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
snake_case_ : List[Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(lowercase_ )
from datasets import load_dataset
snake_case_ : str = load_dataset('''nielsr/rvlcdip-demo''' )
snake_case_ : Tuple = dataset['''train'''][0]['''image'''].convert('''RGB''' )
snake_case_ : Union[str, Any] = image_processor(lowercase_ , return_tensors='''pt''' ).to(lowercase_ )
# forward pass
with torch.no_grad():
snake_case_ : List[Any] = model(**lowercase_ )
snake_case_ : int = outputs.logits
snake_case_ : Any = torch.Size((1, 16) )
self.assertEqual(logits.shape , lowercase_ )
snake_case_ : Any = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=lowercase_ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 712
|
"""simple docstring"""
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _UpperCAmelCase ( lowerCAmelCase__):
_lowerCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
_lowerCAmelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 485
| 0
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
_A , _A = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny', from_pt=UpperCamelCase__, dtype=jnp.bfloataa )
_A , _A = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', controlnet=UpperCamelCase__, from_pt=UpperCamelCase__, dtype=jnp.bfloataa )
_A = controlnet_params
_A = 'bird'
_A = jax.device_count()
_A = pipe.prepare_text_inputs([prompts] * num_samples )
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
_A = pipe.prepare_image_inputs([canny_image] * num_samples )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(UpperCamelCase__, jax.device_count() )
_A = replicate(UpperCamelCase__ )
_A = shard(UpperCamelCase__ )
_A = shard(UpperCamelCase__ )
_A = pipe(
prompt_ids=UpperCamelCase__, image=UpperCamelCase__, params=UpperCamelCase__, prng_seed=UpperCamelCase__, num_inference_steps=50, jit=UpperCamelCase__, ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 2_53:2_56, 2_53:2_56, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
_A , _A = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose', from_pt=UpperCamelCase__, dtype=jnp.bfloataa )
_A , _A = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', controlnet=UpperCamelCase__, from_pt=UpperCamelCase__, dtype=jnp.bfloataa )
_A = controlnet_params
_A = 'Chef in the kitchen'
_A = jax.device_count()
_A = pipe.prepare_text_inputs([prompts] * num_samples )
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
_A = pipe.prepare_image_inputs([pose_image] * num_samples )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(UpperCamelCase__, jax.device_count() )
_A = replicate(UpperCamelCase__ )
_A = shard(UpperCamelCase__ )
_A = shard(UpperCamelCase__ )
_A = pipe(
prompt_ids=UpperCamelCase__, image=UpperCamelCase__, params=UpperCamelCase__, prng_seed=UpperCamelCase__, num_inference_steps=50, jit=UpperCamelCase__, ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 2_53:2_56, 2_53:2_56, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 107
|
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = []
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
return self.node_position[vertex]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pos
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase__ = 2 * start + 1
else:
lowerCAmelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child]
lowerCAmelCase__ ,lowerCAmelCase__ = (
heap[start],
positions[start],
)
lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa
lowerCAmelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = position[index]
while index != 0:
lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase__ = heap[parent]
lowerCAmelCase__ = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , __magic_name__ )
break
lowerCAmelCase__ = parent
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , 0 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = positions[0]
lowerCAmelCase__ = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Heap()
lowerCAmelCase__ = [0] * len(UpperCamelCase_ )
lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase__ = []
for vertex in range(len(UpperCamelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase_ )
heap.node_position.append(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
lowerCAmelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = distance
heap.heapify(UpperCamelCase_ , UpperCamelCase_ )
for _ in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_ )]
):
lowerCAmelCase__ = distance
heap.bottom_to_top(
UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : str = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 48
| 0
|
from math import isqrt
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> list[int]:
"""simple docstring"""
_UpperCAmelCase : int = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = False
return [i for i in range(2 , _SCREAMING_SNAKE_CASE ) if is_prime[i]]
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE = 1_0**8 ) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = calculate_prime_numbers(max_number // 2 )
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : str = len(_SCREAMING_SNAKE_CASE ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'''{solution() = }''')
| 328
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.17.0.dev0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/text-classification/requirements.txt')
__lowerCamelCase = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase:
__A: Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
__A: Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
__A: int = field(
default=10_24 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__A: bool = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__A: bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
__A: Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__A: Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__A: Optional[int] = field(
default=SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
__A: Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """A csv or a json file containing the training data."""} )
__A: Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """A csv or a json file containing the validation data."""} )
__A: Optional[str] = field(default=SCREAMING_SNAKE_CASE , metadata={"""help""": """A csv or a json file containing the test data."""} )
def a__ ( self : List[str] ):
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name." )
else:
_UpperCAmelCase : Union[str, Any] = self.train_file.split("." )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
_UpperCAmelCase : Optional[Any] = self.validation_file.split("." )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class _UpperCamelCase:
__A: str = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__A: Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__A: Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__A: Optional[str] = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__A: bool = field(
default=SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__A: str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__A: bool = field(
default=SCREAMING_SNAKE_CASE , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase : Any = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
_UpperCAmelCase : List[Any] = training_args.get_process_log_level()
logger.setLevel(_SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(_SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
_UpperCAmelCase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase : Any = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
_UpperCAmelCase : Any = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
_UpperCAmelCase : Optional[int] = data_args.train_file.split("." )[-1]
_UpperCAmelCase : Optional[Any] = data_args.test_file.split("." )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
_UpperCAmelCase : Tuple = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`." )
for key in data_files.keys():
logger.info(F"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(".csv" ):
# Loading a dataset from local csv files
_UpperCAmelCase : Tuple = load_dataset("csv" , data_files=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
_UpperCAmelCase : List[str] = load_dataset("json" , data_files=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
_UpperCAmelCase : Tuple = raw_datasets["train"].features["label"].names
_UpperCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
_UpperCAmelCase : List[Any] = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=_SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
_UpperCAmelCase : Optional[int] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
_UpperCAmelCase : Union[str, Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
_UpperCAmelCase : Tuple = {"Refused": 0, "Entailed": 1}
_UpperCAmelCase : Union[str, Any] = {0: "Refused", 1: "Entailed"}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
F"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
_UpperCAmelCase : Dict = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(_SCREAMING_SNAKE_CASE ):
# Tokenize the texts
def _convert_table_text_to_pandas(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = [_table_row.split("#" ) for _table_row in _table_text.strip("\n" ).split("\n" )]
_UpperCAmelCase : Dict = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
_UpperCAmelCase : List[str] = examples["statement"]
_UpperCAmelCase : int = list(map(_convert_table_text_to_pandas , examples["table_text"] ) )
_UpperCAmelCase : Dict = tokenizer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = examples["label"]
return result
with training_args.main_process_first(desc="dataset map pre-processing" ):
_UpperCAmelCase : Any = raw_datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on dataset" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
_UpperCAmelCase : Tuple = raw_datasets["train"]
if data_args.max_train_samples is not None:
_UpperCAmelCase : Any = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
_UpperCAmelCase : Tuple = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
_UpperCAmelCase : Optional[int] = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset" )
_UpperCAmelCase : List[str] = raw_datasets["test"]
if data_args.max_predict_samples is not None:
_UpperCAmelCase : str = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(_SCREAMING_SNAKE_CASE ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = p.predictions[0] if isinstance(p.predictions , _SCREAMING_SNAKE_CASE ) else p.predictions
_UpperCAmelCase : Tuple = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
_UpperCAmelCase : List[str] = default_data_collator
elif training_args.fpaa:
_UpperCAmelCase : Dict = DataCollatorWithPadding(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 )
else:
_UpperCAmelCase : List[Any] = None
# Initialize our Trainer
_UpperCAmelCase : Union[str, Any] = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
_UpperCAmelCase : Optional[Any] = None
if training_args.resume_from_checkpoint is not None:
_UpperCAmelCase : str = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_UpperCAmelCase : int = last_checkpoint
_UpperCAmelCase : List[Any] = trainer.train(resume_from_checkpoint=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = train_result.metrics
_UpperCAmelCase : List[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(_SCREAMING_SNAKE_CASE )
)
_UpperCAmelCase : Tuple = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("train" , _SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : List[str] = trainer.evaluate(eval_dataset=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Dict = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any = min(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("eval" , _SCREAMING_SNAKE_CASE )
trainer.save_metrics("eval" , _SCREAMING_SNAKE_CASE )
if training_args.do_predict:
logger.info("*** Predict ***" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
_UpperCAmelCase : int = predict_dataset.remove_columns("label" )
_UpperCAmelCase : Union[str, Any] = trainer.predict(_SCREAMING_SNAKE_CASE , metric_key_prefix="predict" ).predictions
_UpperCAmelCase : List[str] = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
_UpperCAmelCase : Union[str, Any] = os.path.join(training_args.output_dir , "predict_results_tabfact.txt" )
if trainer.is_world_process_zero():
with open(_SCREAMING_SNAKE_CASE , "w" ) as writer:
logger.info("***** Predict Results *****" )
writer.write("index\tprediction\n" )
for index, item in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
_UpperCAmelCase : Tuple = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"}
if training_args.push_to_hub:
trainer.push_to_hub(**_SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**_SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 328
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_A: str = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: List[Any] = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Optional[Any] = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A: Tuple = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_A: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 126
|
'''simple docstring'''
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
_A: str = logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase_ )
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self , *__A , **__A ):
super().__init__(*__A , **__A )
self.check_model_type(__A )
def __lowerCamelCase ( self , __A=None , __A=None , __A=None , **__A ):
__UpperCAmelCase , __UpperCAmelCase = {}, {}
if padding is not None:
__UpperCAmelCase = padding
if truncation is not None:
__UpperCAmelCase = truncation
if top_k is not None:
__UpperCAmelCase = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , __A , __A = None , **__A ):
if isinstance(__A , (Image.Image, str) ) and isinstance(__A , __A ):
__UpperCAmelCase = {'image': image, 'question': question}
else:
__UpperCAmelCase = image
__UpperCAmelCase = super().__call__(__A , **__A )
return results
def __lowerCamelCase ( self , __A , __A=False , __A=False ):
__UpperCAmelCase = load_image(inputs['image'] )
__UpperCAmelCase = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=__A , truncation=__A )
__UpperCAmelCase = self.image_processor(images=__A , return_tensors=self.framework )
model_inputs.update(__A )
return model_inputs
def __lowerCamelCase ( self , __A ):
__UpperCAmelCase = self.model(**__A )
return model_outputs
def __lowerCamelCase ( self , __A , __A=5 ):
if top_k > self.model.config.num_labels:
__UpperCAmelCase = self.model.config.num_labels
if self.framework == "pt":
__UpperCAmelCase = model_outputs.logits.sigmoid()[0]
__UpperCAmelCase , __UpperCAmelCase = probs.topk(__A )
else:
raise ValueError(f'Unsupported framework: {self.framework}' )
__UpperCAmelCase = scores.tolist()
__UpperCAmelCase = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(__A , __A )]
| 126
| 1
|
"""simple docstring"""
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
snake_case_ : List[Any] = logging.getLogger()
def lowercase_ ( _lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Tuple = {}
UpperCAmelCase : str = os.path.join(lowerCAmelCase__ , "all_results.json" )
if os.path.exists(lowerCAmelCase__ ):
with open(lowerCAmelCase__ , "r" ) as f:
UpperCAmelCase : Dict = json.load(lowerCAmelCase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
snake_case_ : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class snake_case__ ( lowerCAmelCase_ ):
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
import xla_spawn
UpperCAmelCase : Dict = self.get_auto_remove_tmp_dir()
UpperCAmelCase : Union[str, Any] = f"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
UpperCAmelCase : Optional[Any] = time()
xla_spawn.main()
UpperCAmelCase : int = time()
UpperCAmelCase : str = get_results(_UpperCAmelCase )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
import xla_spawn
UpperCAmelCase : Any = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(_UpperCAmelCase , "argv" , _UpperCAmelCase ):
xla_spawn.main()
| 714
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class snake_case__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = 1
@register_to_config
def __init__( self : Optional[int] , lowercase : int = 20_00 , lowercase : float = 0.1_5 , lowercase : float = 0.0_1 , lowercase : float = 1_3_4_8.0 , lowercase : float = 1E-5 , lowercase : int = 1 , ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = sigma_max
# setable values
UpperCAmelCase : Any = None
self.set_sigmas(lowercase , lowercase , lowercase , lowercase )
def __lowerCAmelCase ( self : Optional[Any] , lowercase : torch.FloatTensor , lowercase : Optional[int] = None ):
'''simple docstring'''
return sample
def __lowerCAmelCase ( self : Optional[Any] , lowercase : int , lowercase : float = None , lowercase : Union[str, torch.device] = None ):
'''simple docstring'''
UpperCAmelCase : List[str] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCAmelCase : Tuple = torch.linspace(1 , lowercase , lowercase , device=lowercase )
def __lowerCAmelCase ( self : Dict , lowercase : int , lowercase : float = None , lowercase : float = None , lowercase : float = None ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCAmelCase : List[str] = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCAmelCase : Union[str, Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(lowercase , lowercase )
UpperCAmelCase : List[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCAmelCase : Any = torch.exp(torch.linspace(math.log(lowercase ) , math.log(lowercase ) , lowercase ) )
UpperCAmelCase : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def __lowerCAmelCase ( self : List[Any] , lowercase : Dict , lowercase : Optional[Any] ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def __lowerCAmelCase ( self : List[Any] , lowercase : torch.FloatTensor , lowercase : int , lowercase : torch.FloatTensor , lowercase : Optional[torch.Generator] = None , lowercase : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
UpperCAmelCase : Any = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCAmelCase : Dict = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCAmelCase : List[str] = timesteps.to(self.discrete_sigmas.device )
UpperCAmelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device )
UpperCAmelCase : Tuple = self.get_adjacent_sigma(lowercase , lowercase ).to(sample.device )
UpperCAmelCase : Optional[int] = torch.zeros_like(lowercase )
UpperCAmelCase : Union[str, Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCAmelCase : List[Any] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCAmelCase : Optional[int] = diffusion.unsqueeze(-1 )
UpperCAmelCase : Optional[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCAmelCase : List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=lowercase , device=sample.device , dtype=sample.dtype )
UpperCAmelCase : int = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCAmelCase : Tuple = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=lowercase , prev_sample_mean=lowercase )
def __lowerCAmelCase ( self : List[str] , lowercase : torch.FloatTensor , lowercase : torch.FloatTensor , lowercase : Optional[torch.Generator] = None , lowercase : bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCAmelCase : str = randn_tensor(sample.shape , layout=sample.layout , generator=lowercase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCAmelCase : Tuple = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCAmelCase : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCAmelCase : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCAmelCase : int = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCAmelCase : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCAmelCase : Union[str, Any] = step_size.unsqueeze(-1 )
UpperCAmelCase : Optional[Any] = sample + step_size * model_output
UpperCAmelCase : Union[str, Any] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowercase )
def __lowerCAmelCase ( self : Optional[Any] , lowercase : torch.FloatTensor , lowercase : torch.FloatTensor , lowercase : torch.FloatTensor , ):
'''simple docstring'''
UpperCAmelCase : Dict = timesteps.to(original_samples.device )
UpperCAmelCase : Optional[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCAmelCase : List[str] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(lowercase ) * sigmas[:, None, None, None]
)
UpperCAmelCase : Optional[Any] = noise + original_samples
return noisy_samples
def __len__( self : List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 292
| 0
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
UpperCAmelCase_ = random.Random()
def SCREAMING_SNAKE_CASE ( a_ : Optional[Any] , a_ : Any=1.0 , a_ : Optional[Any]=None , a_ : Union[str, Any]=None ):
if rng is None:
__a = global_rng
__a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowercase ( unittest.TestCase ):
def __init__( self , UpperCamelCase , UpperCamelCase=7 , UpperCamelCase=400 , UpperCamelCase=2000 , UpperCamelCase=10 , UpperCamelCase=160 , UpperCamelCase=8 , UpperCamelCase=0.0 , UpperCamelCase=4000 , UpperCamelCase=False , UpperCamelCase=True , ) -> Optional[Any]:
__a = parent
__a = batch_size
__a = min_seq_length
__a = max_seq_length
__a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__a = padding_value
__a = sampling_rate
__a = return_attention_mask
__a = do_normalize
__a = feature_size
__a = chunk_length
__a = hop_length
def UpperCamelCase__ ( self ) -> Tuple:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self , UpperCamelCase=False , UpperCamelCase=False ) -> int:
def _flatten(UpperCamelCase ):
return list(itertools.chain(*UpperCamelCase ) )
if equal_length:
__a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__a = [np.asarray(UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowercase ( __magic_name__ , unittest.TestCase ):
_a = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase__ ( self ) -> str:
__a = WhisperFeatureExtractionTester(self )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
__a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = feat_extract_first.save_pretrained(UpperCamelCase )[0]
check_json_file_has_correct_format(UpperCamelCase )
__a = self.feature_extraction_class.from_pretrained(UpperCamelCase )
__a = feat_extract_first.to_dict()
__a = feat_extract_second.to_dict()
__a = feat_extract_first.mel_filters
__a = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self ) -> Dict:
__a = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__a = os.path.join(UpperCamelCase , 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCamelCase )
__a = self.feature_extraction_class.from_json_file(UpperCamelCase )
__a = feat_extract_first.to_dict()
__a = feat_extract_second.to_dict()
__a = feat_extract_first.mel_filters
__a = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase__ ( self ) -> Optional[Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__a = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__a = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
__a = feature_extractor(UpperCamelCase , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__a = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__a = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
# Test batched
__a = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
__a = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase , UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__a = np.asarray(UpperCamelCase )
__a = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
__a = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase , UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
# Test truncation required
__a = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__a = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs]
__a = [x[: feature_extractor.n_samples] for x in speech_inputs]
__a = [np.asarray(UpperCamelCase ) for speech_input in speech_inputs_truncated]
__a = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
__a = feature_extractor(UpperCamelCase , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase , UpperCamelCase ):
self.assertTrue(np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) )
def UpperCamelCase__ ( self ) -> int:
import torch
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = np.random.rand(100 , 32 ).astype(np.floataa )
__a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__a = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__a = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase__ ( self , UpperCamelCase ) -> Tuple:
__a = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__a = ds.sort('id' ).select(range(UpperCamelCase ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self ) -> int:
# fmt: off
__a = torch.tensor(
[
0.1_193, -0.0_946, -0.1_098, -0.0_196, 0.0_225, -0.0_690, -0.1_736, 0.0_951,
0.0_971, -0.0_817, -0.0_702, 0.0_162, 0.0_260, 0.0_017, -0.0_192, -0.1_678,
0.0_709, -0.1_867, -0.0_655, -0.0_274, -0.0_234, -0.1_884, -0.0_516, -0.0_554,
-0.0_274, -0.1_425, -0.1_423, 0.0_837, 0.0_377, -0.0_854
] )
# fmt: on
__a = self._load_datasamples(1 )
__a = WhisperFeatureExtractor()
__a = feature_extractor(UpperCamelCase , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCamelCase , atol=1e-4 ) )
def UpperCamelCase__ ( self ) -> List[Any]:
__a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__a = self._load_datasamples(1 )[0]
__a = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5535 # Rescale to [0, 65535] to show issue
__a = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase ) - 1 ) < 1e-3 ) )
| 539
|
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( a_ : str = "" ):
__a = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__a = BeautifulSoup(requests.get(a_ ).text , 'html.parser' )
__a = soup.find_all('td' , attrs='titleColumn' )
__a = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(a_ , a_ )
}
def SCREAMING_SNAKE_CASE ( a_ : str = "IMDb_Top_250_Movies.csv" ):
__a = get_imdb_top_aaa_movies()
with open(a_ , 'w' , newline='' ) as out_file:
__a = csv.writer(a_ )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 539
| 1
|
import numpy as np
def lowerCAmelCase_ ( snake_case_ : np.ndarray , snake_case_ : np.ndarray , snake_case_ : float = 1E-1_2 , snake_case_ : int = 1_00 , ) -> tuple[float, np.ndarray]:
'''simple docstring'''
assert np.shape(snake_case_ )[0] == np.shape(snake_case_ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case_ )[0] == np.shape(snake_case_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case_ ) == np.iscomplexobj(snake_case_ )
UpperCAmelCase_ = np.iscomplexobj(snake_case_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCAmelCase_ = False
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1E1_2
while not convergence:
# Multiple matrix by the vector.
UpperCAmelCase_ = np.dot(snake_case_ , snake_case_ )
# Normalize the resulting output vector.
UpperCAmelCase_ = w / np.linalg.norm(snake_case_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCAmelCase_ = vector.conj().T if is_complex else vector.T
UpperCAmelCase_ = np.dot(snake_case_ , np.dot(snake_case_ , snake_case_ ) )
# Check convergence.
UpperCAmelCase_ = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCAmelCase_ = True
UpperCAmelCase_ = lambda_
if is_complex:
UpperCAmelCase_ = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCAmelCase_ = np.array([41, 4, 20] )
UpperCAmelCase_ = real_input_matrix.astype(np.complexaaa )
UpperCAmelCase_ = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCAmelCase_ = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCAmelCase_ = real_input_matrix
UpperCAmelCase_ = real_vector
elif problem_type == "complex":
UpperCAmelCase_ = complex_input_matrix
UpperCAmelCase_ = complex_vector
# Our implementation.
UpperCAmelCase_ , UpperCAmelCase_ = power_iteration(snake_case_ , snake_case_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCAmelCase_ , UpperCAmelCase_ = np.linalg.eigh(snake_case_ )
# Last eigenvalue is the maximum one.
UpperCAmelCase_ = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCAmelCase_ = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case_ ) - np.abs(snake_case_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 716
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class __A :
def __init__(self : Dict , __a : Any ):
UpperCAmelCase_ = data
UpperCAmelCase_ = None
class __A :
def __init__(self : Dict ):
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def __iter__(self : Any ):
UpperCAmelCase_ = self.head
while self.head:
yield node.data
UpperCAmelCase_ = node.next
if node == self.head:
break
def __len__(self : str ):
return sum(1 for _ in self )
def __repr__(self : str ):
return "->".join(str(__a ) for item in iter(self ) )
def _lowercase (self : Tuple , __a : Any ):
self.insert_nth(len(self ) , __a )
def _lowercase (self : Optional[int] , __a : Any ):
self.insert_nth(0 , __a )
def _lowercase (self : Union[str, Any] , __a : int , __a : Any ):
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
UpperCAmelCase_ = Node(__a )
if self.head is None:
UpperCAmelCase_ = new_node # first node points itself
UpperCAmelCase_ = UpperCAmelCase_ = new_node
elif index == 0: # insert at head
UpperCAmelCase_ = self.head
UpperCAmelCase_ = UpperCAmelCase_ = new_node
else:
UpperCAmelCase_ = self.head
for _ in range(index - 1 ):
UpperCAmelCase_ = temp.next
UpperCAmelCase_ = temp.next
UpperCAmelCase_ = new_node
if index == len(self ) - 1: # insert at tail
UpperCAmelCase_ = new_node
def _lowercase (self : int ):
return self.delete_nth(0 )
def _lowercase (self : List[Any] ):
return self.delete_nth(len(self ) - 1 )
def _lowercase (self : int , __a : int = 0 ):
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
UpperCAmelCase_ = self.head
if self.head == self.tail: # just one node
UpperCAmelCase_ = UpperCAmelCase_ = None
elif index == 0: # delete head node
UpperCAmelCase_ = self.tail.next.next
UpperCAmelCase_ = self.head.next
else:
UpperCAmelCase_ = self.head
for _ in range(index - 1 ):
UpperCAmelCase_ = temp.next
UpperCAmelCase_ = temp.next
UpperCAmelCase_ = temp.next.next
if index == len(self ) - 1: # delete at tail
UpperCAmelCase_ = temp
return delete_node.data
def _lowercase (self : Optional[int] ):
return len(self ) == 0
def lowerCAmelCase_ ( ) -> None:
'''simple docstring'''
UpperCAmelCase_ = CircularLinkedList()
assert len(snake_case_ ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case_ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case_ ) == i
circular_linked_list.insert_nth(snake_case_ , i + 1 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case_ ) == "->".join(str(snake_case_ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 415
| 0
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
SCREAMING_SNAKE_CASE = torch.permute(_UpperCAmelCase , (0, 2, 1))
elif flax_key_tuple[-1] == "kernel" and ".".join(_UpperCAmelCase):
# linear layer
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
SCREAMING_SNAKE_CASE = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
SCREAMING_SNAKE_CASE = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if "metadata" in layer:
SCREAMING_SNAKE_CASE = layer.split('metadata')
SCREAMING_SNAKE_CASE = ''.join(split_layer[0])[:-1]
SCREAMING_SNAKE_CASE = [tuple(('metadata' + split_layer[1]).split('/'))]
elif "kvstore" in layer:
SCREAMING_SNAKE_CASE = layer.split('kvstore')
SCREAMING_SNAKE_CASE = ''.join(split_layer[0])[:-1]
SCREAMING_SNAKE_CASE = [tuple(('kvstore' + split_layer[1]).split('/'))]
else:
SCREAMING_SNAKE_CASE = layer.split('/')
SCREAMING_SNAKE_CASE = '/'.join(split_layer[:-1])
SCREAMING_SNAKE_CASE = (split_layer[-1],)
if "kvstore/path" in layer:
SCREAMING_SNAKE_CASE = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
SCREAMING_SNAKE_CASE = 'file'
else:
SCREAMING_SNAKE_CASE = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = rename_keys(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = {}
for k, v in current_block.items():
SCREAMING_SNAKE_CASE = v
SCREAMING_SNAKE_CASE = new_current_block
torch.save(_UpperCAmelCase , _UpperCAmelCase)
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = WEIGHTS_NAME):
SCREAMING_SNAKE_CASE = convert_file_size_to_int(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase)
with gfile.GFile(switch_checkpoint_path + '/checkpoint' , 'rb') as fp:
SCREAMING_SNAKE_CASE = serialization.msgpack_restore(fp.read())['optimizer']['target']
SCREAMING_SNAKE_CASE = flatten_dict(_UpperCAmelCase , sep='/')
SCREAMING_SNAKE_CASE = {}
for layer in checkpoint_info.keys():
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_key_and_tensorstore_dict(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
if curr_real_layer_name in all_layers:
SCREAMING_SNAKE_CASE = content
else:
SCREAMING_SNAKE_CASE = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
SCREAMING_SNAKE_CASE = ts.open(unflatten_dict(all_layers[key])).result().read().result()
SCREAMING_SNAKE_CASE = torch.tensor(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = raw_weights.numel() * dtype_byte_size(raw_weights.dtype)
# use the renaming pattern from the small conversion scripts
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = rename_base_flax_keys(tuple(key.split('/')) , _UpperCAmelCase)
SCREAMING_SNAKE_CASE = '/'.join(_UpperCAmelCase)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
SCREAMING_SNAKE_CASE = os.path.join(
_UpperCAmelCase , weights_name.replace('.bin' , F'''-{len(_UpperCAmelCase)+1:05d}-of-???.bin'''))
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase)
sharded_state_dicts.append(current_block.keys())
del current_block
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = raw_weights.to(getattr(_UpperCAmelCase , _UpperCAmelCase))
current_block_size += weight_size
total_size += weight_size
# Add the last block
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , weights_name.replace('.bin' , F'''-{len(_UpperCAmelCase)+1:05d}-of-???.bin'''))
rename_and_save_block(_UpperCAmelCase , _UpperCAmelCase)
sharded_state_dicts.append(current_block.keys())
# If we only have one shard, we return it
if len(_UpperCAmelCase) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
for idx, shard in enumerate(_UpperCAmelCase):
SCREAMING_SNAKE_CASE = weights_name.replace(
'.bin' , F'''-{idx+1:05d}-of-{len(_UpperCAmelCase):05d}.bin''') # len(sharded_state_dicts):05d}
SCREAMING_SNAKE_CASE = os.path.join(_UpperCAmelCase , weights_name.replace('.bin' , F'''-{idx+1:05d}-of-???.bin'''))
os.rename(_UpperCAmelCase , os.path.join(_UpperCAmelCase , _UpperCAmelCase))
SCREAMING_SNAKE_CASE = shard
for key in shard:
SCREAMING_SNAKE_CASE = shard_file
# Add the metadata
SCREAMING_SNAKE_CASE = {'total_size': total_size}
SCREAMING_SNAKE_CASE = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(_UpperCAmelCase , _UpperCAmelCase) , 'w' , encoding='utf-8') as f:
SCREAMING_SNAKE_CASE = json.dumps(_UpperCAmelCase , indent=2 , sort_keys=_UpperCAmelCase) + '\n'
f.write(_UpperCAmelCase)
return metadata, index
if __name__ == "__main__":
a_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
a_ : Any = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowerCamelCase__ ():
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
SCREAMING_SNAKE_CASE = SwitchTransformersConfig.from_pretrained('google/switch-base-8')
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted')
SCREAMING_SNAKE_CASE = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted' , device_map='auto')
SCREAMING_SNAKE_CASE = TaTokenizer.from_pretrained('t5-small')
SCREAMING_SNAKE_CASE = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
SCREAMING_SNAKE_CASE = tokenizer(_UpperCAmelCase , return_tensors='pt').input_ids
SCREAMING_SNAKE_CASE = model.generate(_UpperCAmelCase , decoder_start_token_id=0)
print(tokenizer.decode(out[0]))
| 73
|
import math
import os
import sys
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = ''
try:
with open(_UpperCAmelCase , 'rb') as binary_file:
SCREAMING_SNAKE_CASE = binary_file.read()
for dat in data:
SCREAMING_SNAKE_CASE = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
lexicon.pop(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = last_match_id
if math.loga(_UpperCAmelCase).is_integer():
for curr_key in lexicon:
SCREAMING_SNAKE_CASE = '0' + lexicon[curr_key]
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
def lowerCamelCase__ (_UpperCAmelCase):
SCREAMING_SNAKE_CASE = {'0': '0', '1': '1'}
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = '', ''
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
for i in range(len(_UpperCAmelCase)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
index += 1
SCREAMING_SNAKE_CASE = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
SCREAMING_SNAKE_CASE = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = os.path.getsize(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = bin(_UpperCAmelCase)[2:]
SCREAMING_SNAKE_CASE = len(_UpperCAmelCase)
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = 8
try:
with open(_UpperCAmelCase , 'wb') as opened_file:
SCREAMING_SNAKE_CASE = [
to_write[i : i + byte_length]
for i in range(0 , len(_UpperCAmelCase) , _UpperCAmelCase)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('10000000')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(_UpperCAmelCase , 2).to_bytes(1 , byteorder='big'))
except OSError:
print('File not accessible')
sys.exit()
def lowerCamelCase__ (_UpperCAmelCase , _UpperCAmelCase):
SCREAMING_SNAKE_CASE = read_file_binary(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = compress_data(_UpperCAmelCase)
SCREAMING_SNAKE_CASE = add_file_length(_UpperCAmelCase , _UpperCAmelCase)
write_file_binary(_UpperCAmelCase , _UpperCAmelCase)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 73
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : List[Any] = '''switch_transformers'''
A : Tuple = ['''past_key_values''']
A : List[Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__(self , _a=32_128 , _a=768 , _a=64 , _a=2_048 , _a=64 , _a=12 , _a=3 , _a=12 , _a=3 , _a=12 , _a=8 , _a=False , _a=0.01 , _a="float32" , _a=False , _a=32 , _a=128 , _a=0.1 , _a=1e-6 , _a=0.0_01 , _a=0.0_01 , _a=1.0 , _a="relu" , _a=True , _a=False , _a=True , _a=0 , _a=1 , **_a , ) -> Dict:
lowercase_ : int = vocab_size
lowercase_ : str = d_model
lowercase_ : List[str] = d_kv
lowercase_ : List[Any] = d_ff
lowercase_ : Optional[int] = num_sparse_encoder_layers
lowercase_ : int = num_layers
lowercase_ : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase_ : Optional[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowercase_ : Dict = self.num_layers // self.num_sparse_encoder_layers
else:
lowercase_ : List[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowercase_ : Dict = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowercase_ : str = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowercase_ : Any = num_heads
lowercase_ : Dict = num_experts
lowercase_ : int = expert_capacity
lowercase_ : int = router_bias
lowercase_ : Union[str, Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' )
lowercase_ : int = router_dtype
lowercase_ : Optional[Any] = router_ignore_padding_tokens
lowercase_ : Any = relative_attention_num_buckets
lowercase_ : Optional[int] = relative_attention_max_distance
lowercase_ : List[Any] = dropout_rate
lowercase_ : List[Any] = layer_norm_epsilon
lowercase_ : Tuple = initializer_factor
lowercase_ : Dict = feed_forward_proj
lowercase_ : Dict = use_cache
lowercase_ : int = add_router_probs
lowercase_ : Optional[Any] = router_z_loss_coef
lowercase_ : int = router_aux_loss_coef
lowercase_ : Dict = self.feed_forward_proj.split('-' )
lowercase_ : Union[str, Any] = act_info[-1]
lowercase_ : Optional[Any] = act_info[0] == 'gated'
if len(_a ) > 1 and act_info[0] != "gated" or len(_a ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase_ : Union[str, Any] = 'gelu_new'
super().__init__(
pad_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , **_a , )
| 438
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
_A = None
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
_A = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model'
),
},
'tokenizer_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json'
),
},
}
_A = {
'facebook/nllb-large-en-ro': 1_0_2_4,
'facebook/nllb-200-distilled-600M': 1_0_2_4,
}
# fmt: off
_A = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
A : List[str] = VOCAB_FILES_NAMES
A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = ['''input_ids''', '''attention_mask''']
A : str = NllbTokenizer
A : List[int] = []
A : List[int] = []
def __init__(self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , _a=False , **_a , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
lowercase_ : Union[str, Any] = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
lowercase_ : Any = legacy_behaviour
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , legacy_behaviour=_a , **_a , )
lowercase_ : Optional[int] = vocab_file
lowercase_ : str = False if not self.vocab_file else True
lowercase_ : Union[str, Any] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowercase_ : Tuple = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ : Optional[Any] = src_lang if src_lang is not None else 'eng_Latn'
lowercase_ : Tuple = self.convert_tokens_to_ids(self._src_lang )
lowercase_ : List[Any] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowerCamelCase (self ) -> str:
return self._src_lang
@src_lang.setter
def _lowerCamelCase (self , _a ) -> None:
lowercase_ : Optional[int] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase (self , _a , _a = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase (self , _a , _a = None ) -> List[int]:
lowercase_ : Tuple = [self.sep_token_id]
lowercase_ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase (self , _a , _a , _a , _a , **_a ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase_ : Dict = src_lang
lowercase_ : Dict = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
lowercase_ : Tuple = self.convert_tokens_to_ids(_a )
lowercase_ : Optional[Any] = tgt_lang_id
return inputs
def _lowerCamelCase (self , _a , _a = "eng_Latn" , _a = None , _a = "fra_Latn" , **_a , ) -> BatchEncoding:
lowercase_ : Dict = src_lang
lowercase_ : Optional[int] = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def _lowerCamelCase (self ) -> List[Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase (self ) -> Tuple:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase (self , _a ) -> None:
lowercase_ : Dict = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
lowercase_ : Tuple = []
lowercase_ : str = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : Union[str, Any] = [self.cur_lang_code]
lowercase_ : Union[str, Any] = [self.eos_token_id]
lowercase_ : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : Tuple = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase (self , _a ) -> None:
lowercase_ : str = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
lowercase_ : List[Any] = []
lowercase_ : Optional[Any] = [self.eos_token_id, self.cur_lang_code]
else:
lowercase_ : Optional[int] = [self.cur_lang_code]
lowercase_ : Dict = [self.eos_token_id]
lowercase_ : Dict = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : List[Any] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase (self , _a , _a = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowercase_ : Any = os.path.join(
_a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 438
| 1
|
'''simple docstring'''
_A: Dict = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> list[str]:
__UpperCAmelCase = set()
# keep track of all the paths to be checked
__UpperCAmelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__UpperCAmelCase = queue.pop(0 )
# get the last node from the path
__UpperCAmelCase = path[-1]
if node not in explored:
__UpperCAmelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__UpperCAmelCase = list(_a )
new_path.append(_a )
queue.append(_a )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(_a )
# in case there's no path between the 2 nodes
return []
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__UpperCAmelCase = [start]
__UpperCAmelCase = set(_a )
# Keep tab on distances from `start` node.
__UpperCAmelCase = {start: 0, target: -1}
while queue:
__UpperCAmelCase = queue.pop(0 )
if node == target:
__UpperCAmelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(_a )
queue.append(_a )
__UpperCAmelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, """G""", """D""")) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, """G""", """D""")) # returns 4
| 126
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
_lowerCAmelCase = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
_lowerCAmelCase = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
_lowerCAmelCase = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class UpperCAmelCase__ ( snake_case__ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_INIT_CONFIGURATION
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = BertTokenizer
def __init__( self , A__=None , A__=None , A__=True , A__="[UNK]" , A__="[SEP]" , A__="[PAD]" , A__="[CLS]" , A__="[MASK]" , A__=True , A__=None , **A__ , ):
"""simple docstring"""
super().__init__(
A__ , tokenizer_file=A__ , do_lower_case=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , tokenize_chinese_chars=A__ , strip_accents=A__ , **A__ , )
UpperCAmelCase_: Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , A__ ) != do_lower_case
or normalizer_state.get("strip_accents" , A__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , A__ ) != tokenize_chinese_chars
):
UpperCAmelCase_: Any = getattr(A__ , normalizer_state.pop("type" ) )
UpperCAmelCase_: Any = do_lower_case
UpperCAmelCase_: Dict = strip_accents
UpperCAmelCase_: int = tokenize_chinese_chars
UpperCAmelCase_: Optional[int] = normalizer_class(**A__ )
UpperCAmelCase_: Dict = do_lower_case
def snake_case_ ( self , A__ , A__=None ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case_ ( self , A__ , A__ = None ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = [self.sep_token_id]
UpperCAmelCase_: str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case_ ( self , A__ , A__ = None ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
| 137
| 0
|
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowerCAmelCase : Optional[Any] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 16000 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : Any = int(round(sample_rate * max_length ) )
if len(UpperCAmelCase__ ) <= sample_length:
return wav
_lowerCamelCase : List[str] = randint(0 , len(UpperCAmelCase__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class A_ :
lowerCAmelCase__ = field(default=_a , metadata={'help': 'Name of a dataset from the datasets package'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'A file containing the training audio paths and labels.'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'A file containing the validation audio paths and labels.'} )
lowerCAmelCase__ = field(
default='train' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
lowerCAmelCase__ = field(
default='validation' , metadata={
'help': (
'The name of the training data set split to use (via the datasets library). Defaults to \'validation\''
)
} , )
lowerCAmelCase__ = field(
default='audio' , metadata={'help': 'The name of the dataset column containing the audio data. Defaults to \'audio\''} , )
lowerCAmelCase__ = field(
default='label' , metadata={'help': 'The name of the dataset column containing the labels. Defaults to \'label\''} )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
lowerCAmelCase__ = field(
default=2_0 , metadata={'help': 'Audio clips will be randomly cut to this length during training if the value is set.'} , )
@dataclass
class A_ :
lowerCAmelCase__ = field(
default='facebook/wav2vec2-base' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from the Hub'} )
lowerCAmelCase__ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Whether to freeze the feature encoder layers of the model.'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Whether to generate an attention mask in the feature extractor.'} )
lowerCAmelCase__ = field(
default=_a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
lowerCAmelCase__ = field(
default=_a , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def _lowercase ( self: Dict ):
'''simple docstring'''
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
"The argument `--freeze_feature_extractor` is deprecated and "
"will be removed in a future version. Use `--freeze_feature_encoder`"
"instead. Setting `freeze_feature_encoder==True`." ,__UpperCamelCase ,)
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
"The argument `--freeze_feature_extractor` is deprecated and "
"should not be used in combination with `--freeze_feature_encoder`."
"Only make use of `--freeze_feature_encoder`." )
def lowerCamelCase_( ) -> List[str]:
'''simple docstring'''
_lowerCamelCase : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : str = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_audio_classification" , UpperCAmelCase__ , UpperCAmelCase__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_lowerCamelCase : Tuple = training_args.get_process_log_level()
logger.setLevel(UpperCAmelCase__ )
transformers.utils.logging.set_verbosity(UpperCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
_lowerCamelCase : Optional[int] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_lowerCamelCase : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to train from scratch." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset and prepare it for the audio classification task.
_lowerCamelCase : Optional[int] = DatasetDict()
_lowerCamelCase : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCamelCase : Union[str, Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. """
"Make sure to set `--audio_column_name` to the correct audio column - one of "
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F"""--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. """
"Make sure to set `--label_column_name` to the correct text column - one of "
F"""{', '.join(raw_datasets['train'].column_names )}.""" )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
_lowerCamelCase : Optional[Any] = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
_lowerCamelCase : str = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
_lowerCamelCase : List[str] = feature_extractor.model_input_names[0]
def train_transforms(_lowerCamelCase ):
_lowerCamelCase : Dict = []
for audio in batch[data_args.audio_column_name]:
_lowerCamelCase : Union[str, Any] = random_subsample(
audio["array"] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(UpperCAmelCase__ )
_lowerCamelCase : Any = feature_extractor(UpperCAmelCase__ , sampling_rate=feature_extractor.sampling_rate )
_lowerCamelCase : Tuple = {model_input_name: inputs.get(UpperCAmelCase__ )}
_lowerCamelCase : int = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(_lowerCamelCase ):
_lowerCamelCase : str = [audio["array"] for audio in batch[data_args.audio_column_name]]
_lowerCamelCase : Any = feature_extractor(UpperCAmelCase__ , sampling_rate=feature_extractor.sampling_rate )
_lowerCamelCase : Optional[int] = {model_input_name: inputs.get(UpperCAmelCase__ )}
_lowerCamelCase : Union[str, Any] = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
_lowerCamelCase : List[Any] = raw_datasets["train"].features[data_args.label_column_name].names
_lowerCamelCase, _lowerCamelCase : int = {}, {}
for i, label in enumerate(UpperCAmelCase__ ):
_lowerCamelCase : Tuple = str(UpperCAmelCase__ )
_lowerCamelCase : int = label
# Load the accuracy metric from the datasets package
_lowerCamelCase : List[str] = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(_lowerCamelCase ):
_lowerCamelCase : Dict = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=UpperCAmelCase__ , references=eval_pred.label_ids )
_lowerCamelCase : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(UpperCAmelCase__ ) , labelaid=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , finetuning_task="audio-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_lowerCamelCase : List[str] = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
_lowerCamelCase : Dict = (
raw_datasets["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(UpperCAmelCase__ , output_all_columns=UpperCAmelCase__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
_lowerCamelCase : int = (
raw_datasets["eval"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(UpperCAmelCase__ , output_all_columns=UpperCAmelCase__ )
# Initialize our trainer
_lowerCamelCase : Optional[Any] = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=raw_datasets["train"] if training_args.do_train else None , eval_dataset=raw_datasets["eval"] if training_args.do_eval else None , compute_metrics=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , )
# Training
if training_args.do_train:
_lowerCamelCase : Union[str, Any] = None
if training_args.resume_from_checkpoint is not None:
_lowerCamelCase : List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_lowerCamelCase : Any = last_checkpoint
_lowerCamelCase : Optional[Any] = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
_lowerCamelCase : int = trainer.evaluate()
trainer.log_metrics("eval" , UpperCAmelCase__ )
trainer.save_metrics("eval" , UpperCAmelCase__ )
# Write model card and (optionally) push to hub
_lowerCamelCase : Dict = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "audio-classification",
"dataset": data_args.dataset_name,
"tags": ["audio-classification"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**UpperCAmelCase__ )
else:
trainer.create_model_card(**UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 715
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any]=13 ,__lowerCAmelCase: List[str]=3 ,__lowerCAmelCase: Optional[Any]=224 ,__lowerCAmelCase: Optional[int]=30 ,__lowerCAmelCase: Union[str, Any]=400 ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Union[str, Any]=[0.5, 0.5, 0.5] ,__lowerCAmelCase: Tuple=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = size if size is not None else {"height": 18, "width": 18}
_lowerCamelCase : Tuple = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Any = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : Optional[int] = min_resolution
_lowerCamelCase : List[str] = max_resolution
_lowerCamelCase : int = do_resize
_lowerCamelCase : Dict = size
_lowerCamelCase : Optional[int] = do_normalize
_lowerCamelCase : int = image_mean
_lowerCamelCase : Tuple = image_std
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = ViTImageProcessor if is_vision_available() else None
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_mean" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_std" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_normalize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_resize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"size" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Dict = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,Image.Image )
# Test not batched input
_lowerCamelCase : Dict = image_processor(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
# Test batched
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : str = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCAmelCase ,numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,np.ndarray )
# Test not batched input
_lowerCamelCase : List[Any] = image_processor(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
# Test batched
_lowerCamelCase : Dict = image_processor(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : int = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCAmelCase ,torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,torch.Tensor )
# Test not batched input
_lowerCamelCase : int = image_processor(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
# Test batched
_lowerCamelCase : Tuple = image_processor(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
| 386
| 0
|
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
A_ : Union[str, Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : str = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A_ : Dict = """"""
else:
A_ : List[Any] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Dict = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
A_ : Union[str, Any] = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
A_ : List[str] = in_proj_weight[
: config.hidden_size, :
]
A_ : str = in_proj_bias[: config.hidden_size]
A_ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Optional[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : int = in_proj_weight[
-config.hidden_size :, :
]
A_ : Optional[int] = in_proj_bias[-config.hidden_size :]
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Tuple = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(__A , __A )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = dct.pop(__A )
A_ : Any = val
def _SCREAMING_SNAKE_CASE ( ):
A_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : List[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Optional[Any] = ViTConfig()
A_ : Dict = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
A_ : Dict = True
A_ : Union[str, Any] = int(vit_name[-12:-10] )
A_ : Union[str, Any] = int(vit_name[-9:-6] )
else:
A_ : Dict = 1_000
A_ : List[Any] = """huggingface/label-files"""
A_ : str = """imagenet-1k-id2label.json"""
A_ : Optional[int] = json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
A_ : Optional[int] = {int(__A ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : Optional[Any] = {v: k for k, v in idalabel.items()}
A_ : Optional[Any] = int(vit_name[-6:-4] )
A_ : Optional[Any] = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
A_ : Union[str, Any] = 192
A_ : Optional[int] = 768
A_ : Optional[int] = 12
A_ : Tuple = 3
elif vit_name[9:].startswith('''small''' ):
A_ : Tuple = 384
A_ : Any = 1_536
A_ : Any = 12
A_ : Any = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
A_ : Optional[int] = 768
A_ : str = 2_304
A_ : Optional[int] = 8
A_ : int = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
A_ : Optional[Any] = 1_024
A_ : List[str] = 4_096
A_ : List[str] = 24
A_ : Optional[Any] = 16
elif vit_name[4:].startswith('''huge''' ):
A_ : List[str] = 1_280
A_ : Dict = 5_120
A_ : Dict = 32
A_ : Optional[Any] = 16
# load original model from timm
A_ : Any = timm.create_model(__A , pretrained=__A )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Union[str, Any] = timm_model.state_dict()
if base_model:
remove_classification_head_(__A )
A_ : List[Any] = create_rename_keys(__A , __A )
for src, dest in rename_keys:
rename_key(__A , __A , __A )
read_in_q_k_v(__A , __A , __A )
# load HuggingFace model
if vit_name[-5:] == "in21k":
A_ : Optional[Any] = ViTModel(__A ).eval()
else:
A_ : Dict = ViTForImageClassification(__A ).eval()
model.load_state_dict(__A )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
A_ : Dict = DeiTImageProcessor(size=config.image_size )
else:
A_ : List[Any] = ViTImageProcessor(size=config.image_size )
A_ : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='''pt''' )
A_ : List[Any] = encoding["""pixel_values"""]
A_ : str = model(__A )
if base_model:
A_ : List[Any] = timm_model.forward_features(__A )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__A , outputs.pooler_output , atol=1e-3 )
else:
A_ : Union[str, Any] = timm_model(__A )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__A , outputs.logits , atol=1e-3 )
Path(__A ).mkdir(exist_ok=__A )
print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__A )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 590
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : str = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Dict = '''mobilenet_v1'''
def __init__( self ,SCREAMING_SNAKE_CASE_=3 ,SCREAMING_SNAKE_CASE_=224 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=8 ,SCREAMING_SNAKE_CASE_="relu6" ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=0.9_99 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=0.0_01 ,**SCREAMING_SNAKE_CASE_ ,):
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case : List[Any] = num_channels
snake_case : str = image_size
snake_case : List[Any] = depth_multiplier
snake_case : Optional[int] = min_depth
snake_case : Union[str, Any] = hidden_act
snake_case : int = tf_padding
snake_case : Optional[int] = classifier_dropout_prob
snake_case : Tuple = initializer_range
snake_case : List[str] = layer_norm_eps
class _A ( snake_case ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = version.parse('''1.11''' )
@property
def snake_case_ ( self ):
'''simple docstring'''
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 1E-4
| 36
| 0
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __magic_name__ ( _snake_case , unittest.TestCase ):
UpperCAmelCase = VideoToVideoSDPipeline
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
UpperCAmelCase = PipelineTesterMixin.required_optional_params - {"""latents"""}
UpperCAmelCase = False
# No `output_type`.
UpperCAmelCase = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _UpperCamelCase ( self : str ) -> Dict:
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=3_2 , attention_head_dim=4 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase__ , set_alpha_to_one=lowerCAmelCase__ , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
UpperCAmelCase = CLIPTextModel(lowerCAmelCase__ )
UpperCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _UpperCamelCase ( self : Tuple , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=0 ) -> Optional[int]:
# 3 frames
UpperCAmelCase = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ )
if str(lowerCAmelCase__ ).startswith("mps" ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase__ )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ )
UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = VideoToVideoSDPipeline(**lowerCAmelCase__ )
UpperCAmelCase = sd_pipe.to(lowerCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase__ )
UpperCAmelCase = "np"
UpperCAmelCase = sd_pipe(**lowerCAmelCase__ ).frames
UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
UpperCAmelCase = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCamelCase ( self : Tuple ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase__ , expected_max_diff=5e-3 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def _UpperCamelCase ( self : str ) -> Tuple:
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def _UpperCamelCase ( self : List[str] ) -> Dict:
pass
def _UpperCamelCase ( self : List[str] ) -> int:
return super().test_progress_bar()
@slow
@skip_mps
class __magic_name__ ( unittest.TestCase ):
def _UpperCamelCase ( self : Optional[int] ) -> Optional[int]:
UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_XL" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=lowerCAmelCase__ )
UpperCAmelCase = video.to("cuda" )
UpperCAmelCase = "Spiderman is surfing"
UpperCAmelCase = pipe(lowerCAmelCase__ , video=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=3 , output_type="pt" ).frames
UpperCAmelCase = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 1
|
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = ""
lowerCAmelCase__ = 1 # (0 is vertical, 1 is horizontal)
def _lowerCAmelCase( ):
UpperCAmelCase , UpperCAmelCase = get_dataset(__A , __A )
print("Processing..." )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = update_image_and_anno(__A , __A , __A )
for index, image in enumerate(__A ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
UpperCAmelCase = random_chars(32 )
UpperCAmelCase = paths[index].split(os.sep )[-1].rsplit("." , 1 )[0]
UpperCAmelCase = F"{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"
cva.imwrite(F"/{file_root}.jpg" , __A , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Success {index+1}/{len(__A )} with {file_name}" )
UpperCAmelCase = []
for anno in new_annos[index]:
UpperCAmelCase = F"{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"
annos_list.append(__A )
with open(F"/{file_root}.txt" , "w" ) as outfile:
outfile.write("\n".join(line for line in annos_list ) )
def _lowerCAmelCase( __A , __A ):
UpperCAmelCase = []
UpperCAmelCase = []
for label_file in glob.glob(os.path.join(__A , "*.txt" ) ):
UpperCAmelCase = label_file.split(os.sep )[-1].rsplit("." , 1 )[0]
with open(__A ) as in_file:
UpperCAmelCase = in_file.readlines()
UpperCAmelCase = os.path.join(__A , F"{label_name}.jpg" )
UpperCAmelCase = []
for obj_list in obj_lists:
UpperCAmelCase = obj_list.rstrip("\n" ).split(" " )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__A )
labels.append(__A )
return img_paths, labels
def _lowerCAmelCase( __A , __A , __A = 1 ):
UpperCAmelCase = []
UpperCAmelCase = []
UpperCAmelCase = []
for idx in range(len(__A ) ):
UpperCAmelCase = []
UpperCAmelCase = img_list[idx]
path_list.append(__A )
UpperCAmelCase = anno_list[idx]
UpperCAmelCase = cva.imread(__A )
if flip_type == 1:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
UpperCAmelCase = cva.flip(__A , __A )
for bbox in img_annos:
UpperCAmelCase = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__A )
new_imgs_list.append(__A )
return new_imgs_list, new_annos_lists, path_list
def _lowerCAmelCase( __A = 32 ):
assert number_char > 1, "The number of character should greater than 1"
UpperCAmelCase = ascii_lowercase + digits
return "".join(random.choice(__A ) for _ in range(__A ) )
if __name__ == "__main__":
main()
print("DONE ✅")
| 1
| 1
|
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ :Optional[int] = get_logger(__name__)
class lowercase ( enum.Enum ):
lowerCamelCase : str = '''all_checks'''
lowerCamelCase : str = '''basic_checks'''
lowerCamelCase : Any = '''no_checks'''
class lowercase ( _UpperCAmelCase ):
pass
class lowercase ( _UpperCAmelCase ):
pass
class lowercase ( _UpperCAmelCase ):
pass
class lowercase ( _UpperCAmelCase ):
pass
def a ( A__ , A__ , A__=None ) -> Optional[int]:
'''simple docstring'''
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(A__ ) - set(A__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(A__ ) - set(A__ ) ) )
if len(set(A__ ) - set(A__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(A__ ) - set(A__ ) ) )
SCREAMING_SNAKE_CASE__ : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(A__ ) > 0:
raise NonMatchingChecksumError(
f"""Checksums didn't match{for_verification_name}:\n"""
f"""{bad_urls}\n"""
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class lowercase ( _UpperCAmelCase ):
pass
class lowercase ( _UpperCAmelCase ):
pass
class lowercase ( _UpperCAmelCase ):
pass
class lowercase ( _UpperCAmelCase ):
pass
def a ( A__ , A__ ) -> Dict:
'''simple docstring'''
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(A__ ) - set(A__ ) ) > 0:
raise ExpectedMoreSplits(str(set(A__ ) - set(A__ ) ) )
if len(set(A__ ) - set(A__ ) ) > 0:
raise UnexpectedSplits(str(set(A__ ) - set(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(A__ ) > 0:
raise NonMatchingSplitsSizesError(str(A__ ) )
logger.info('''All the splits matched successfully.''' )
def a ( A__ , A__ = True ) -> dict:
'''simple docstring'''
if record_checksum:
SCREAMING_SNAKE_CASE__ : Optional[Any] = shaaaa()
with open(A__ , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , b'''''' ):
m.update(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE__ : Tuple = None
return {"num_bytes": os.path.getsize(A__ ), "checksum": checksum}
def a ( A__ ) -> List[Any]:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 35
|
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase : Any = {
"""tiny.en""": """https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt""",
"""tiny""": """https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt""",
"""base.en""": """https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt""",
"""base""": """https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt""",
"""small.en""": """https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt""",
"""small""": """https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt""",
"""medium.en""": """https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt""",
"""medium""": """https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt""",
"""large""": """https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt""",
"""large-v2""": """https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt""",
}
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase : Any = ["""layers""", """blocks"""]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Dict = {
"""blocks""": """layers""",
"""mlp.0""": """fc1""",
"""mlp.2""": """fc2""",
"""mlp_ln""": """final_layer_norm""",
""".attn.query""": """.self_attn.q_proj""",
""".attn.key""": """.self_attn.k_proj""",
""".attn.value""": """.self_attn.v_proj""",
""".attn_ln""": """.self_attn_layer_norm""",
""".attn.out""": """.self_attn.out_proj""",
""".cross_attn.query""": """.encoder_attn.q_proj""",
""".cross_attn.key""": """.encoder_attn.k_proj""",
""".cross_attn.value""": """.encoder_attn.v_proj""",
""".cross_attn_ln""": """.encoder_attn_layer_norm""",
""".cross_attn.out""": """.encoder_attn.out_proj""",
"""decoder.ln.""": """decoder.layer_norm.""",
"""encoder.ln.""": """encoder.layer_norm.""",
"""token_embedding""": """embed_tokens""",
"""encoder.positional_embedding""": """encoder.embed_positions.weight""",
"""decoder.positional_embedding""": """decoder.embed_positions.weight""",
"""ln_post""": """layer_norm""",
}
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
lowercase : Optional[Any] = list(s_dict.keys() )
for key in keys:
lowercase : Any = key
for k, v in WHISPER_MAPPING.items():
if k in key:
lowercase : Dict = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(f"{key} -> {new_key}" )
lowercase : Dict = s_dict.pop(SCREAMING_SNAKE_CASE__ )
return s_dict
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
lowercase , lowercase : Optional[Any] = emb.weight.shape
lowercase : Dict = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
lowercase : str = emb.weight.data
return lin_layer
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> bytes:
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
lowercase : Dict = os.path.basename(SCREAMING_SNAKE_CASE__ )
lowercase : Optional[Any] = url.split("""/""" )[-2]
lowercase : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not os.path.isfile(SCREAMING_SNAKE_CASE__ ):
raise RuntimeError(f"{download_target} exists and is not a regular file" )
if os.path.isfile(SCREAMING_SNAKE_CASE__ ):
lowercase : Any = open(SCREAMING_SNAKE_CASE__ , """rb""" ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" )
with urllib.request.urlopen(SCREAMING_SNAKE_CASE__ ) as source, open(SCREAMING_SNAKE_CASE__ , """wb""" ) as output:
with tqdm(
total=int(source.info().get("""Content-Length""" ) ) , ncols=80 , unit="""iB""" , unit_scale=SCREAMING_SNAKE_CASE__ , unit_divisor=1_024 ) as loop:
while True:
lowercase : Any = source.read(8_192 )
if not buffer:
break
output.write(SCREAMING_SNAKE_CASE__ )
loop.update(len(SCREAMING_SNAKE_CASE__ ) )
lowercase : Dict = open(SCREAMING_SNAKE_CASE__ , """rb""" ).read()
if hashlib.shaaaa(SCREAMING_SNAKE_CASE__ ).hexdigest() != expected_shaaaa:
raise RuntimeError(
"""Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.""" )
return model_bytes
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
if ".pt" not in checkpoint_path:
lowercase : Any = _download(_MODELS[checkpoint_path] )
else:
lowercase : Tuple = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
lowercase : Tuple = original_checkpoint["""dims"""]
lowercase : str = original_checkpoint["""model_state_dict"""]
lowercase : Tuple = state_dict["""decoder.token_embedding.weight"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
rename_keys(SCREAMING_SNAKE_CASE__ )
lowercase : Dict = True
lowercase : str = state_dict["""decoder.layers.0.fc1.weight"""].shape[0]
lowercase : Tuple = WhisperConfig(
vocab_size=dimensions["""n_vocab"""] , encoder_ffn_dim=SCREAMING_SNAKE_CASE__ , decoder_ffn_dim=SCREAMING_SNAKE_CASE__ , num_mel_bins=dimensions["""n_mels"""] , d_model=dimensions["""n_audio_state"""] , max_target_positions=dimensions["""n_text_ctx"""] , encoder_layers=dimensions["""n_audio_layer"""] , encoder_attention_heads=dimensions["""n_audio_head"""] , decoder_layers=dimensions["""n_text_layer"""] , decoder_attention_heads=dimensions["""n_text_state"""] , max_source_positions=dimensions["""n_audio_ctx"""] , )
lowercase : Any = WhisperForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
lowercase , lowercase : Union[str, Any] = model.model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0 and not set(SCREAMING_SNAKE_CASE__ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"""Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"""
f" but all the following weights are missing {missing}" )
if tie_embeds:
lowercase : List[str] = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
lowercase : str = proj_out_weights
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Patht to the downloaded checkpoints""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowercase : Optional[int] = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 336
| 0
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( __lowerCamelCase, __lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = IFPipeline
__UpperCAmelCase : List[str] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
__UpperCAmelCase : List[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
__UpperCAmelCase : Union[str, Any] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def _UpperCamelCase ( self ):
return self._get_dummy_components()
def _UpperCamelCase ( self , a_ , a_=0 ):
if str(a_ ).startswith("mps" ):
lowerCamelCase_ : int = torch.manual_seed(a_ )
else:
lowerCamelCase_ : Any = torch.Generator(device=a_ ).manual_seed(a_ )
lowerCamelCase_ : List[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _UpperCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _UpperCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCamelCase ( self ):
self._test_save_load_local()
def _UpperCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ):
# if
lowerCamelCase_ : Optional[Any] = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
lowerCamelCase_ : Optional[Any] = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=a_ , tokenizer=a_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
lowerCamelCase_ ,lowerCamelCase_ : List[Any] = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCamelCase_ : Optional[Any] = None
lowerCamelCase_ : Tuple = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(a_ , a_ , a_ , a_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCamelCase_ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
lowerCamelCase_ : str = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(a_ , a_ , a_ , a_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCamelCase_ : int = IFInpaintingPipeline(**pipe_a.components )
lowerCamelCase_ : List[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(a_ , a_ , a_ , a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase_ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : Union[str, Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
lowerCamelCase_ : List[str] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowerCamelCase_ : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : List[Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ : Dict = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : str = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : int = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
lowerCamelCase_ : Union[str, Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCamelCase_ : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ : Optional[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : List[str] = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : List[Any] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ : Any = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ : Union[str, Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def _UpperCamelCase ( self , a_ , a_ , a_ , a_ ):
# pipeline 1
_start_torch_memory_measurement()
lowerCamelCase_ : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(a_ )
lowerCamelCase_ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : Any = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , num_inference_steps=2 , generator=a_ , output_type="np" , )
lowerCamelCase_ : List[Any] = output.images[0]
assert image.shape == (64, 64, 3)
lowerCamelCase_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCamelCase_ : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(a_ , a_ )
# pipeline 2
_start_torch_memory_measurement()
lowerCamelCase_ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase_ : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : Any = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(a_ )
lowerCamelCase_ : List[Any] = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(a_ )
lowerCamelCase_ : Optional[int] = pipe_a(
prompt_embeds=a_ , negative_prompt_embeds=a_ , image=a_ , mask_image=a_ , original_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
lowerCamelCase_ : Tuple = output.images[0]
assert image.shape == (256, 256, 3)
lowerCamelCase_ : Optional[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCamelCase_ : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(a_ , a_ )
def __magic_name__ ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 73
|
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = '''ClapFeatureExtractor'''
__UpperCAmelCase : List[str] = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , a_ , a_ ):
super().__init__(a_ , a_ )
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
lowerCamelCase_ : Any = kwargs.pop("sampling_rate" , a_ )
if text is None and audios is None:
raise ValueError("You have to specify either text or audios. Both cannot be none." )
if text is not None:
lowerCamelCase_ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if audios is not None:
lowerCamelCase_ : List[str] = self.feature_extractor(
a_ , sampling_rate=a_ , return_tensors=a_ , **a_ )
if text is not None and audios is not None:
lowerCamelCase_ : List[str] = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _UpperCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _UpperCamelCase ( self ):
lowerCamelCase_ : int = self.tokenizer.model_input_names
lowerCamelCase_ : Dict = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) )
| 73
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = CycleDiffusionPipeline
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
lowerCamelCase__ = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowerCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCamelCase ( self ):
torch.manual_seed(0 )
snake_case__ : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=3_2 , )
snake_case__ : List[str] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1_0_0_0 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , )
torch.manual_seed(0 )
snake_case__ : List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case__ : Any = CLIPTextModel(__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
snake_case__ : List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
snake_case__ : List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = image / 2 + 0.5
if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
snake_case__ : Union[str, Any] = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
snake_case__ : int = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def __UpperCamelCase ( self ):
snake_case__ : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
snake_case__ : Union[str, Any] = self.get_dummy_components()
snake_case__ : int = CycleDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
snake_case__ : Any = pipe(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = output.images
snake_case__ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
snake_case__ : Optional[int] = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __UpperCamelCase ( self ):
snake_case__ : str = self.get_dummy_components()
for name, module in components.items():
if hasattr(__SCREAMING_SNAKE_CASE , """half""" ):
snake_case__ : Dict = module.half()
snake_case__ : Any = CycleDiffusionPipeline(**__SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
snake_case__ : Tuple = self.get_dummy_inputs(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = pipe(**__SCREAMING_SNAKE_CASE )
snake_case__ : int = output.images
snake_case__ : Dict = images[0, -3:, -3:, -1]
assert images.shape == (1, 3_2, 3_2, 3)
snake_case__ : Tuple = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __UpperCamelCase ( self ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def __UpperCamelCase ( self ):
return super().test_inference_batch_single_identical()
@skip_mps
def __UpperCamelCase ( self ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def __UpperCamelCase ( self ):
return super().test_save_load_optional_components()
@skip_mps
def __UpperCamelCase ( self ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCamelCase ( self ):
snake_case__ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
snake_case__ : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
snake_case__ : Dict = init_image.resize((5_1_2, 5_1_2) )
snake_case__ : Tuple = """CompVis/stable-diffusion-v1-4"""
snake_case__ : Any = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder="""scheduler""" )
snake_case__ : Optional[Any] = CycleDiffusionPipeline.from_pretrained(
__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
snake_case__ : Optional[int] = """A black colored car"""
snake_case__ : int = """A blue colored car"""
snake_case__ : Optional[Any] = torch.manual_seed(0 )
snake_case__ : Any = pipe(
prompt=__SCREAMING_SNAKE_CASE , source_prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
snake_case__ : List[Any] = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
snake_case__ : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
snake_case__ : Any = init_image.resize((5_1_2, 5_1_2) )
snake_case__ : List[str] = """CompVis/stable-diffusion-v1-4"""
snake_case__ : Dict = DDIMScheduler.from_pretrained(__SCREAMING_SNAKE_CASE , subfolder="""scheduler""" )
snake_case__ : Dict = CycleDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE )
pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
snake_case__ : Tuple = """A black colored car"""
snake_case__ : List[str] = """A blue colored car"""
snake_case__ : Tuple = torch.manual_seed(0 )
snake_case__ : List[str] = pipe(
prompt=__SCREAMING_SNAKE_CASE , source_prompt=__SCREAMING_SNAKE_CASE , image=__SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=__SCREAMING_SNAKE_CASE , output_type="""np""" , )
snake_case__ : int = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 38
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
# Load configuration defined in the metadata file
with open(snake_case ) as metadata_file:
SCREAMING_SNAKE_CASE:str = json.load(snake_case )
SCREAMING_SNAKE_CASE:List[str] = LukeConfig(use_entity_aware_attention=snake_case , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
SCREAMING_SNAKE_CASE:Tuple = torch.load(snake_case , map_location="cpu" )
# Load the entity vocab file
SCREAMING_SNAKE_CASE:Dict = load_entity_vocab(snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
SCREAMING_SNAKE_CASE:Dict = AddedToken("<ent>" , lstrip=snake_case , rstrip=snake_case )
SCREAMING_SNAKE_CASE:List[Any] = AddedToken("<ent2>" , lstrip=snake_case , rstrip=snake_case )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(snake_case )
with open(os.path.join(snake_case , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(snake_case , snake_case )
SCREAMING_SNAKE_CASE:str = LukeTokenizer.from_pretrained(snake_case )
# Initialize the embeddings of the special tokens
SCREAMING_SNAKE_CASE:Optional[Any] = state_dict["embeddings.word_embeddings.weight"]
SCREAMING_SNAKE_CASE:Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE:Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
SCREAMING_SNAKE_CASE:Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''encoder.layer.{layer_index}.attention.self.'''
SCREAMING_SNAKE_CASE:List[Any] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE:List[Any] = state_dict[prefix + matrix_name]
SCREAMING_SNAKE_CASE:List[str] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
SCREAMING_SNAKE_CASE:str = state_dict["entity_embeddings.entity_embeddings.weight"]
SCREAMING_SNAKE_CASE:int = entity_emb[entity_vocab["[MASK]"]]
SCREAMING_SNAKE_CASE:str = LukeModel(config=snake_case ).eval()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:int = model.load_state_dict(snake_case , strict=snake_case )
if not (len(snake_case ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'''Missing keys {", ".join(snake_case )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
F''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
SCREAMING_SNAKE_CASE:Optional[Any] = LukeTokenizer.from_pretrained(snake_case , task="entity_classification" )
SCREAMING_SNAKE_CASE:Tuple = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
SCREAMING_SNAKE_CASE:List[str] = (39, 42)
SCREAMING_SNAKE_CASE:int = tokenizer(snake_case , entity_spans=[span] , add_prefix_space=snake_case , return_tensors="pt" )
SCREAMING_SNAKE_CASE:Any = model(**snake_case )
# Verify word hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 42, 1024) )
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 42, 768) )
SCREAMING_SNAKE_CASE:int = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
SCREAMING_SNAKE_CASE:Union[str, Any] = torch.Size((1, 1, 1024) )
SCREAMING_SNAKE_CASE:Optional[int] = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
SCREAMING_SNAKE_CASE:List[str] = torch.Size((1, 1, 768) )
SCREAMING_SNAKE_CASE:Any = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
F''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(snake_case ) )
model.save_pretrained(snake_case )
def A_ ( snake_case ):
SCREAMING_SNAKE_CASE:Any = {}
with open(snake_case , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(snake_case ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = line.rstrip().split("\t" )
SCREAMING_SNAKE_CASE:str = index
return entity_vocab
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
A_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 143
| 0
|
import math
def UpperCAmelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__A = [True] * n
__A = False
__A = False
__A = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__A = i * 2
while index < n:
__A = False
__A = index + i
__A = [2]
for i in range(3 , lowerCAmelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def UpperCAmelCase ( lowerCAmelCase__ = 9999_6666_3333 ):
'''simple docstring'''
__A = math.floor(math.sqrt(lowerCAmelCase__ ) ) + 100
__A = prime_sieve(lowerCAmelCase__ )
__A = 0
__A = 0
__A = primes[prime_index]
while (last_prime**2) <= limit:
__A = primes[prime_index + 1]
__A = last_prime**2
__A = next_prime**2
# Get numbers divisible by lps(current)
__A = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__A = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__A = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__A = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 205
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
snake_case_ : str ={
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Dict =['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : Optional[Any] =[
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : List[str] =[
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
snake_case_ : List[str] =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 205
| 1
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : int ,a__ : Optional[Any] ,a__ : Optional[Any]=3 ,a__ : Optional[int]=7 ,a__ : Optional[int]=True ,a__ : Optional[int]=True ,a__ : Union[str, Any]=False ,a__ : int=True ,a__ : Any=99 ,a__ : Tuple=32 ,a__ : str=5 ,a__ : List[Any]=4 ,a__ : List[str]=37 ,a__ : List[Any]="gelu" ,a__ : Tuple=0.1 ,a__ : str=0.1 ,a__ : int=5_12 ,a__ : Dict=16 ,a__ : str=2 ,a__ : Optional[int]=0.02 ,a__ : Union[str, Any]=3 ,a__ : Dict=4 ,a__ : Union[str, Any]=None ,):
a__ = parent
a__ = batch_size
a__ = seq_length
a__ = is_training
a__ = use_input_mask
a__ = use_token_type_ids
a__ = use_labels
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = type_sequence_label_size
a__ = initializer_range
a__ = num_labels
a__ = num_choices
a__ = scope
def lowerCAmelCase_ ( self : List[Any] ):
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
a__ = None
if self.use_input_mask:
a__ = random_attention_mask([self.batch_size, self.seq_length] )
a__ = None
a__ = None
a__ = None
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
a__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
a__ = ids_tensor([self.batch_size] ,self.num_choices )
a__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : List[Any] ):
return FalconConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowerCamelCase ,initializer_range=self.initializer_range ,pad_token_id=1 ,new_decoder_architecture=__lowerCamelCase ,)
def lowerCAmelCase_ ( self : List[str] ,a__ : Tuple ,a__ : Dict ,a__ : int ,a__ : Union[str, Any] ,a__ : Optional[Any] ,a__ : List[Any] ,a__ : List[str] ):
a__ = FalconModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a__ = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )
a__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Dict ,a__ : str ,a__ : str ,a__ : Any ,a__ : Tuple ,a__ : Any ,a__ : List[Any] ,a__ : Union[str, Any] ,a__ : int ,a__ : str ,):
a__ = True
a__ = FalconModel(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a__ = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,encoder_attention_mask=__lowerCamelCase ,)
a__ = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,)
a__ = model(__lowerCamelCase ,attention_mask=__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[Any] ,a__ : str ,a__ : List[str] ,a__ : Tuple ,a__ : int ,a__ : int ,a__ : List[str] ,a__ : Optional[int] ,a__ : Tuple ,a__ : str ,):
a__ = FalconForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a__ = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Dict ,a__ : Dict ,a__ : List[str] ,a__ : List[Any] ,a__ : str ,a__ : Dict ,a__ : Tuple ,a__ : List[str] ,a__ : int ,a__ : str ,):
a__ = True
a__ = True
a__ = FalconForCausalLM(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
# first forward pass
a__ = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,encoder_attention_mask=__lowerCamelCase ,use_cache=__lowerCamelCase ,)
a__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ = ids_tensor((self.batch_size, 3) ,config.vocab_size )
a__ = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
a__ = torch.cat([input_ids, next_tokens] ,dim=-1 )
a__ = torch.cat([input_mask, next_mask] ,dim=-1 )
a__ = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,encoder_attention_mask=__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,)["hidden_states"][0]
a__ = model(
__lowerCamelCase ,attention_mask=__lowerCamelCase ,encoder_hidden_states=__lowerCamelCase ,encoder_attention_mask=__lowerCamelCase ,past_key_values=__lowerCamelCase ,output_hidden_states=__lowerCamelCase ,)["hidden_states"][0]
# select random slice
a__ = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
a__ = output_from_no_past[:, -3:, random_slice_idx].detach()
a__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowerCamelCase ,__lowerCamelCase ,atol=1e-3 ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) = config_and_inputs
a__ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase__ = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (FalconForCausalLM,) if is_torch_available() else ()
UpperCamelCase__ = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCAmelCase_ ( self : Optional[int] ):
a__ = FalconModelTester(self )
a__ = ConfigTester(self ,config_class=__lowerCamelCase ,hidden_size=37 )
def lowerCAmelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : Any ):
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
a__ , *a__ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
a__ = alibi
self.model_tester.create_and_check_model(__lowerCamelCase ,*__lowerCamelCase )
def lowerCAmelCase_ ( self : List[Any] ):
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = input_dict["input_ids"]
a__ = input_ids.ne(1 ).to(__lowerCamelCase )
a__ = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
a__ = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a__ = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,labels=__lowerCamelCase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = "single_label_classification"
a__ = input_dict["input_ids"]
a__ = input_ids.ne(1 ).to(__lowerCamelCase )
a__ = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
a__ = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a__ = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,labels=__lowerCamelCase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : List[str] ):
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = input_dict["input_ids"]
a__ = FalconForCausalLM(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a__ = model(__lowerCamelCase ,use_cache=__lowerCamelCase )
a__ = input_ids.shape[0]
a__ = model._convert_to_rw_cache(result.past_key_values )
a__ = model._convert_cache_to_standard_format(__lowerCamelCase ,__lowerCamelCase )
for layer in range(len(__lowerCamelCase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = 3
a__ = "multi_label_classification"
a__ = input_dict["input_ids"]
a__ = input_ids.ne(1 ).to(__lowerCamelCase )
a__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
a__ = FalconForSequenceClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
a__ = model(__lowerCamelCase ,attention_mask=__lowerCamelCase ,labels=__lowerCamelCase )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__lowerCamelCase ,"use_cache" ):
return
a__ = model_class(__lowerCamelCase ).to(__lowerCamelCase )
if "use_cache" not in inputs:
a__ = True
a__ = model(**__lowerCamelCase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
a__ = (
getattr(__lowerCamelCase ,"decoder_layers" ,__lowerCamelCase )
or getattr(__lowerCamelCase ,"num_decoder_layers" ,__lowerCamelCase )
or config.num_hidden_layers
)
a__ = getattr(__lowerCamelCase ,"num_kv_heads" ,config.num_attention_heads )
a__ = getattr(__lowerCamelCase ,"d_model" ,config.hidden_size )
a__ = embed_dim // num_attention_heads
a__ = outputs["past_key_values"]
self.assertEqual(len(__lowerCamelCase ) ,__lowerCamelCase )
a__ , a__ = inputs["input_ids"].shape
for i in range(__lowerCamelCase ):
if config.new_decoder_architecture:
a__ = config.num_attention_heads
elif config.multi_query:
a__ = 1
self.assertEqual(len(past_kv[0] ) ,2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape ,(batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase_ ( self : int ):
a__ = AutoTokenizer.from_pretrained("Rocketknight1/falcon-rw-1b" )
a__ = FalconForCausalLM.from_pretrained("Rocketknight1/falcon-rw-1b" )
model.eval()
model.to(__lowerCamelCase )
a__ = tokenizer("My favorite food is" ,return_tensors="pt" ).to(__lowerCamelCase )
a__ = (
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
a__ = model.generate(**__lowerCamelCase ,do_sample=__lowerCamelCase ,max_new_tokens=19 )
a__ = tokenizer.batch_decode(__lowerCamelCase )[0]
self.assertEqual(__lowerCamelCase ,__lowerCamelCase )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
a__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
a__ = FalconForCausalLM.from_pretrained(__lowerCamelCase )
model.eval()
model.to(__lowerCamelCase )
a__ = tokenizer("My favorite food is" ,return_tensors="pt" ).to(__lowerCamelCase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__lowerCamelCase ,do_sample=__lowerCamelCase ,max_new_tokens=4 )
model.generate(**__lowerCamelCase ,do_sample=__lowerCamelCase ,max_new_tokens=4 )
model.generate(**__lowerCamelCase ,num_beams=2 ,max_new_tokens=4 )
@slow
def lowerCAmelCase_ ( self : Union[str, Any] ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
a__ = AutoTokenizer.from_pretrained(__lowerCamelCase )
a__ = FalconForCausalLM.from_pretrained(__lowerCamelCase )
model.eval()
model.to(device=__lowerCamelCase )
a__ = tokenizer("My favorite food is" ,return_tensors="pt" ).to(__lowerCamelCase )
# Test results are the same with and without cache
a__ = model.generate(**__lowerCamelCase ,do_sample=__lowerCamelCase ,max_new_tokens=20 ,use_cache=__lowerCamelCase )
a__ = model.generate(**__lowerCamelCase ,do_sample=__lowerCamelCase ,max_new_tokens=20 ,use_cache=__lowerCamelCase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 331
|
"""simple docstring"""
from copy import deepcopy
class UpperCAmelCase :
def __init__( self : Optional[Any] , __lowerCamelCase : list[int] | None = None , __lowerCamelCase : int | None = None ):
"""simple docstring"""
if arr is None and size is not None:
_snake_case = size
_snake_case = [0] * size
elif arr is not None:
self.init(__lowerCamelCase )
else:
raise ValueError('''Either arr or size must be specified''' )
def __UpperCAmelCase ( self : Any , __lowerCamelCase : list[int] ):
"""simple docstring"""
_snake_case = len(__lowerCamelCase )
_snake_case = deepcopy(__lowerCamelCase )
for i in range(1 , self.size ):
_snake_case = self.next_(__lowerCamelCase )
if j < self.size:
self.tree[j] += self.tree[i]
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.tree[:]
for i in range(self.size - 1 , 0 , -1 ):
_snake_case = self.next_(__lowerCamelCase )
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
return index + (index & (-index))
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
return index - (index & (-index))
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_snake_case = self.next_(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
self.add(__lowerCamelCase , value - self.get(__lowerCamelCase ) )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
if right == 0:
return 0
_snake_case = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_snake_case = self.prev(__lowerCamelCase )
return result
def __UpperCAmelCase ( self : str , __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return self.prefix(__lowerCamelCase ) - self.prefix(__lowerCamelCase )
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
return self.query(__lowerCamelCase , index + 1 )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : int ):
"""simple docstring"""
value -= self.tree[0]
if value < 0:
return -1
_snake_case = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_snake_case = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a : int = {
'configuration_clipseg': [
'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPSegConfig',
'CLIPSegTextConfig',
'CLIPSegVisionConfig',
],
'processing_clipseg': ['CLIPSegProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = [
'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPSegModel',
'CLIPSegPreTrainedModel',
'CLIPSegTextModel',
'CLIPSegVisionModel',
'CLIPSegForImageSegmentation',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__a : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700
|
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
__a : Union[str, Any] = None
__a : Union[str, Any] = {
'7B': 1_1008,
'13B': 1_3824,
'30B': 1_7920,
'65B': 2_2016,
'70B': 2_8672,
}
__a : List[Any] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_=1 , lowerCamelCase_=256):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE ( lowerCamelCase_):
with open(lowerCamelCase_ , '''r''') as f:
return json.load(lowerCamelCase_)
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_):
with open(lowerCamelCase_ , '''w''') as f:
json.dump(lowerCamelCase_ , lowerCamelCase_)
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True):
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
a__ = os.path.join(lowerCamelCase_ , '''tmp''')
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
a__ = read_json(os.path.join(lowerCamelCase_ , '''params.json'''))
a__ = NUM_SHARDS[model_size]
a__ = params['''n_layers''']
a__ = params['''n_heads''']
a__ = n_heads // num_shards
a__ = params['''dim''']
a__ = dim // n_heads
a__ = 10000.0
a__ = 1.0 / (base ** (torch.arange(0 , lowerCamelCase_ , 2).float() / dims_per_head))
if "n_kv_heads" in params:
a__ = params['''n_kv_heads'''] # for GQA / MQA
a__ = n_heads_per_shard // num_key_value_heads
a__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
a__ = n_heads
a__ = n_heads_per_shard
a__ = dim
# permute for sliced rotary
def permute(lowerCamelCase_ , lowerCamelCase_=n_heads , lowerCamelCase_=dim , lowerCamelCase_=dim):
return w.view(lowerCamelCase_ , dima // n_heads // 2 , 2 , lowerCamelCase_).transpose(1 , 2).reshape(lowerCamelCase_ , lowerCamelCase_)
print(f'Fetching all parameters from the checkpoint at {input_base_path}.')
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
a__ = torch.load(os.path.join(lowerCamelCase_ , '''consolidated.00.pth''') , map_location='''cpu''')
else:
# Sharded
a__ = [
torch.load(os.path.join(lowerCamelCase_ , f'consolidated.{i:02d}.pth') , map_location='''cpu''')
for i in range(lowerCamelCase_)
]
a__ = 0
a__ = {'''weight_map''': {}}
for layer_i in range(lowerCamelCase_):
a__ = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
a__ = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight']),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight']),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
a__ = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
a__ = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
for i in range(lowerCamelCase_)
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_))
a__ = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
for i in range(lowerCamelCase_)
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
a__ = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
for i in range(lowerCamelCase_)
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_)
a__ = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(lowerCamelCase_)] , dim=1)
a__ = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(lowerCamelCase_)] , dim=0)
a__ = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(lowerCamelCase_)] , dim=1)
a__ = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(lowerCamelCase_)] , dim=0)
a__ = inv_freq
for k, v in state_dict.items():
a__ = filename
param_count += v.numel()
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_))
a__ = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
a__ = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
a__ = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(lowerCamelCase_)] , dim=1),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(lowerCamelCase_)] , dim=0),
}
for k, v in state_dict.items():
a__ = filename
param_count += v.numel()
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_))
# Write configs
a__ = {'''total_size''': param_count * 2}
write_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , '''pytorch_model.bin.index.json'''))
a__ = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
a__ = params['''multiple_of'''] if '''multiple_of''' in params else 256
a__ = LlamaConfig(
hidden_size=lowerCamelCase_ , intermediate_size=compute_intermediate_size(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=lowerCamelCase_ , )
config.save_pretrained(lowerCamelCase_)
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''')
a__ = LlamaForCausalLM.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowerCamelCase_)
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''')
model.save_pretrained(lowerCamelCase_ , safe_serialization=lowerCamelCase_)
shutil.rmtree(lowerCamelCase_)
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_):
# Initialize the tokenizer based on the `spm` model
a__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.')
a__ = tokenizer_class(lowerCamelCase_)
tokenizer.save_pretrained(lowerCamelCase_)
def SCREAMING_SNAKE_CASE ( ):
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=lowerCamelCase_ , help='''Whether or not to save using `safetensors`.''')
a__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
a__ = os.path.join(args.input_dir , '''tokenizer.model''')
write_tokenizer(args.output_dir , lowerCamelCase_)
if __name__ == "__main__":
main()
| 200
| 0
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__)
class snake_case ( a_ ):
def __init__( self : List[Any] , *a_ : Tuple , **a_ : List[str] )-> None:
"""simple docstring"""
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , lowerCAmelCase_ , )
super().__init__(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 85
|
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
__A= np.max(_SCREAMING_SNAKE_CASE,axis=-1,keepdims=_SCREAMING_SNAKE_CASE )
__A= np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1,keepdims=_SCREAMING_SNAKE_CASE )
class a__ ( a_ ):
'''simple docstring'''
def lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase_ : List[str] ) -> List[str]:
__A= {}
if "second_text" in kwargs:
__A= kwargs['second_text']
return preprocess_kwargs, {}, {}
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[int]=None ) -> int:
return self.tokenizer(lowerCAmelCase_ , text_pair=lowerCAmelCase_ , return_tensors=self.framework )
def lowerCAmelCase ( self : Dict , lowerCAmelCase_ : List[str] ) -> Tuple:
return self.model(**lowerCAmelCase_ )
def lowerCAmelCase ( self : str , lowerCAmelCase_ : Dict ) -> Optional[int]:
__A= model_outputs.logits[0].numpy()
__A= softmax(lowerCAmelCase_ )
__A= np.argmax(lowerCAmelCase_ )
__A= self.model.config.idalabel[best_class]
__A= probabilities[best_class].item()
__A= logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 186
| 0
|
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from diffusers import DiffusionPipeline, ImagePipelineOutput
class lowercase__ ( snake_case__ ):
def __init__( self : int , snake_case__ : Tuple , snake_case__ : int ):
super().__init__()
self.register_modules(unet=snake_case__ , scheduler=snake_case__ )
@torch.no_grad()
def __call__( self : Dict , snake_case__ : int = 1 , snake_case__ : Optional[torch.Generator] = None , snake_case__ : int = 50 , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , **snake_case__ : Dict , ):
lowerCamelCase_ : List[str] =torch.randn(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=snake_case__ , )
lowerCamelCase_ : List[Any] =image.to(self.device )
# set step values
self.scheduler.set_timesteps(snake_case__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowerCamelCase_ : Union[str, Any] =self.unet(snake_case__ , snake_case__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ : Union[str, Any] =self.scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
lowerCamelCase_ : Optional[int] =(image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ : Union[str, Any] =image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ : Any =self.numpy_to_pil(snake_case__ )
if not return_dict:
return (image,), "This is a local test"
return ImagePipelineOutput(images=snake_case__ ), "This is a local test"
| 708
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
A__ : List[str] = logging.get_logger(__name__)
A__ : Tuple = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
A__ : List[str] = {
'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'},
'tokenizer_file': {
'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json'
},
}
A__ : List[Any] = {'mobilebert-uncased': 512}
A__ : List[Any] = {}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase :str = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase :Dict = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase :Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase :int = MobileBertTokenizer
def __init__( self : Tuple , snake_case__ : Any=None , snake_case__ : Any=None , snake_case__ : Optional[Any]=True , snake_case__ : Union[str, Any]="[UNK]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : Any="[PAD]" , snake_case__ : int="[CLS]" , snake_case__ : int="[MASK]" , snake_case__ : Optional[Any]=True , snake_case__ : int=None , **snake_case__ : List[Any] , ):
super().__init__(
snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , )
lowerCamelCase_ : Optional[int] =json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case
or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars
):
lowerCamelCase_ : str =getattr(snake_case__ , normalizer_state.pop("type" ) )
lowerCamelCase_ : Union[str, Any] =do_lower_case
lowerCamelCase_ : List[Any] =strip_accents
lowerCamelCase_ : List[Any] =tokenize_chinese_chars
lowerCamelCase_ : Optional[Any] =normalizer_class(**snake_case__ )
lowerCamelCase_ : int =do_lower_case
def UpperCAmelCase__ ( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : Dict=None ):
lowerCamelCase_ : Optional[int] =[self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : str , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
lowerCamelCase_ : Optional[Any] =[self.sep_token_id]
lowerCamelCase_ : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Optional[str] = None ):
lowerCamelCase_ : Optional[Any] =self._tokenizer.model.save(snake_case__ , name=snake_case__ )
return tuple(snake_case__ )
| 244
| 0
|
'''simple docstring'''
from collections.abc import Generator
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_, lowerCAmelCase_ : Dict = 0, 1
while True:
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = b, a + b
yield b
def UpperCamelCase_ ( A__ : int = 10_00 ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 1
lowerCAmelCase_ : int = fibonacci_generator()
while len(str(next(A__ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 275
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__A : str = {
"configuration_audio_spectrogram_transformer": [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ASTConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ASTForAudioClassification",
"ASTModel",
"ASTPreTrainedModel",
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["ASTFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 275
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A_ : List[Any] ={
"""configuration_longformer""": [
"""LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""LongformerConfig""",
"""LongformerOnnxConfig""",
],
"""tokenization_longformer""": ["""LongformerTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any =["""LongformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Dict =[
"""LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LongformerForMaskedLM""",
"""LongformerForMultipleChoice""",
"""LongformerForQuestionAnswering""",
"""LongformerForSequenceClassification""",
"""LongformerForTokenClassification""",
"""LongformerModel""",
"""LongformerPreTrainedModel""",
"""LongformerSelfAttention""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Tuple =[
"""TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLongformerForMaskedLM""",
"""TFLongformerForMultipleChoice""",
"""TFLongformerForQuestionAnswering""",
"""TFLongformerForSequenceClassification""",
"""TFLongformerForTokenClassification""",
"""TFLongformerModel""",
"""TFLongformerPreTrainedModel""",
"""TFLongformerSelfAttention""",
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
A_ : Any =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 222
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A_ : List[str] ={
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def SCREAMING_SNAKE_CASE_ ( snake_case : int , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : List[Any] )-> Tuple:
if got_ver is None or want_ver is None:
raise ValueError(
f'Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'
f' reinstalling {pkg}.' )
if not ops[op](version.parse(snake_case ) , version.parse(snake_case ) ):
raise ImportError(
f'{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}' )
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : Optional[str] = None )-> None:
_lowerCamelCase = f'\n{hint}' if hint is not None else ''
# non-versioned check
if re.match(r'^[\w_\-\d]+$' , snake_case ):
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = requirement, None, None
else:
_lowerCamelCase = re.findall(r'^([^!=<>\s]+)([\s!=<>]{1,2}.+)' , snake_case )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'
f' got {requirement}' )
_lowerCamelCase , _lowerCamelCase = match[0]
_lowerCamelCase = want_full.split(',' ) # there could be multiple requirements
_lowerCamelCase = {}
for w in want_range:
_lowerCamelCase = re.findall(r'^([\s!=<>]{1,2})(.+)' , snake_case )
if not match:
raise ValueError(
'requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'
f' but got {requirement}' )
_lowerCamelCase , _lowerCamelCase = match[0]
_lowerCamelCase = want_ver
if op not in ops:
raise ValueError(f'{requirement}: need one of {list(ops.keys() )}, but got {op}' )
# special case
if pkg == "python":
_lowerCamelCase = '.'.join([str(snake_case ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
return
# check if any version is installed
try:
_lowerCamelCase = importlib.metadata.version(snake_case )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f'The \'{requirement}\' distribution was not found and is required by this application. {hint}' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : str )-> List[Any]:
_lowerCamelCase = 'Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'
return require_version(snake_case , snake_case )
| 222
| 1
|
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowercase_ = re.compile(r'^(?P<major>\d+)' r'\.(?P<minor>\d+)' r'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A = 4_2
_A = None
_A = None
_A = None
_A = None
def _UpperCAmelCase ( self : str ):
_a , _a , _a = _str_to_version_tuple(self.version_str )
def __repr__( self : Any ):
return f"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def _UpperCAmelCase ( self : Optional[Any] ):
return self.major, self.minor, self.patch
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[int] ):
if isinstance(a__ , a__ ):
return Version(a__ )
elif isinstance(a__ , a__ ):
return other
raise TypeError(f"""{other} (type {type(a__ )}) cannot be compared to version.""" )
def __eq__( self : Any , SCREAMING_SNAKE_CASE_ : str ):
try:
_a = self._validate_operand(a__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Tuple , SCREAMING_SNAKE_CASE_ : int ):
_a = self._validate_operand(a__ )
return self.tuple < other.tuple
def __hash__( self : str ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _UpperCAmelCase ( cls : Any , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
_a = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _UpperCAmelCase ( self : Union[str, Any] ):
return self.version_str
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
_a = _VERSION_REG.match(_lowercase )
if not res:
raise ValueError(f"""Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(_lowercase ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
return ".".join(str(_lowercase ) for v in version_tuple )
| 562
|
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase__ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
"""simple docstring"""
@register_to_config
def __init__( self : str ,a__ : int ,a__ : int ,a__ : int ,a__ : float ,a__ : int ,a__ : int ,a__ : int ,a__ : int ,a__ : str ,a__ : bool = False ,):
super().__init__()
a__ = nn.Embedding(a__ ,a__ )
a__ = nn.Embedding(a__ ,a__ )
a__ = False
a__ = nn.Dropout(p=a__ )
a__ = TaConfig(
vocab_size=a__ ,d_model=a__ ,num_heads=a__ ,d_kv=a__ ,d_ff=a__ ,dropout_rate=a__ ,feed_forward_proj=a__ ,is_decoder=a__ ,is_encoder_decoder=a__ ,)
a__ = nn.ModuleList()
for lyr_num in range(a__ ):
a__ = TaBlock(a__ )
self.encoders.append(a__ )
a__ = TaLayerNorm(a__ )
a__ = nn.Dropout(p=a__ )
def lowerCAmelCase_ ( self : Optional[Any] ,a__ : Tuple ,a__ : Optional[int] ):
a__ = self.token_embedder(a__ )
a__ = encoder_input_tokens.shape[1]
a__ = torch.arange(a__ ,device=encoder_input_tokens.device )
x += self.position_encoding(a__ )
a__ = self.dropout_pre(a__ )
# inverted the attention mask
a__ = encoder_input_tokens.size()
a__ = self.get_extended_attention_mask(a__ ,a__ )
for lyr in self.encoders:
a__ = lyr(a__ ,a__ )[0]
a__ = self.layer_norm(a__ )
return self.dropout_post(a__ ), encoder_inputs_mask
| 331
| 0
|
from typing import Any
class A :
"""simple docstring"""
def __init__( self : Dict,lowercase_ : Any )-> Union[str, Any]:
'''simple docstring'''
A__ = data
A__ = None
def __repr__( self : Optional[int] )-> str:
'''simple docstring'''
return F'Node({self.data})'
class A :
"""simple docstring"""
def __init__( self : Union[str, Any] )-> Optional[int]:
'''simple docstring'''
A__ = None
def __iter__( self : Union[str, Any] )-> Any:
'''simple docstring'''
A__ = self.head
while node:
yield node.data
A__ = node.next
def __len__( self : List[str] )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : Optional[Any] )-> str:
'''simple docstring'''
return "->".join([str(lowercase_ ) for item in self] )
def __getitem__( self : Dict,lowercase_ : int )-> Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any],lowercase_ : int,lowercase_ : Any )-> None:
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
A__ = self.head
for _ in range(lowercase_ ):
A__ = current.next
A__ = data
def snake_case__ ( self : Any,lowercase_ : Any )-> None:
'''simple docstring'''
self.insert_nth(len(self ),lowercase_ )
def snake_case__ ( self : str,lowercase_ : Any )-> None:
'''simple docstring'''
self.insert_nth(0,lowercase_ )
def snake_case__ ( self : List[Any],lowercase_ : int,lowercase_ : Any )-> None:
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
A__ = Node(lowercase_ )
if self.head is None:
A__ = new_node
elif index == 0:
A__ = self.head # link new_node to head
A__ = new_node
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = new_node
def snake_case__ ( self : Tuple )-> None: # print every node data
'''simple docstring'''
print(self )
def snake_case__ ( self : Any )-> Any:
'''simple docstring'''
return self.delete_nth(0 )
def snake_case__ ( self : Union[str, Any] )-> Any: # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def snake_case__ ( self : Union[str, Any],lowercase_ : int = 0 )-> Any:
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
A__ = self.head # default first node
if index == 0:
A__ = self.head.next
else:
A__ = self.head
for _ in range(index - 1 ):
A__ = temp.next
A__ = temp.next
A__ = temp.next.next
return delete_node.data
def snake_case__ ( self : Optional[int] )-> bool:
'''simple docstring'''
return self.head is None
def snake_case__ ( self : Optional[Any] )-> None:
'''simple docstring'''
A__ = None
A__ = self.head
while current:
# Store the current node's next node.
A__ = current.next
# Make the current node's next point backwards
A__ = prev
# Make the previous node be the current node
A__ = current
# Make the current node the next node (to progress iteration)
A__ = next_node
# Return prev in order to put the head at the end
A__ = prev
def _snake_case( ) -> None:
'''simple docstring'''
A__ = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(SCREAMING_SNAKE_CASE__ ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE__ , i + 1 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(SCREAMING_SNAKE_CASE__ ) == 9
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
A__ = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(-8 , 1 ) )
def _snake_case( ) -> None:
'''simple docstring'''
A__ = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.5_5555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
A__ = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
A__ = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
A__ = linked_list.delete_tail()
assert result == 12.2
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
A__ = linked_list.delete_nth(10 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _snake_case( ) -> Tuple:
'''simple docstring'''
from doctest import testmod
testmod()
A__ = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(SCREAMING_SNAKE_CASE__ )
print('\nReading/changing Node data using indexing:' )
print(f'Element at Position 1: {linked_list[1]}' )
A__ = input('Enter New Value: ' ).strip()
print('New list:' )
print(SCREAMING_SNAKE_CASE__ )
print(f'length of linked_list is : {len(SCREAMING_SNAKE_CASE__ )}' )
if __name__ == "__main__":
main()
| 586
|
from math import loga
def _snake_case( SCREAMING_SNAKE_CASE__ : int ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 586
| 1
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowercase : List[Any] = '''sew-d'''
def __init__( self , _lowercase=32 , _lowercase=768 , _lowercase=12 , _lowercase=12 , _lowercase=3_072 , _lowercase=2 , _lowercase=512 , _lowercase=256 , _lowercase=True , _lowercase=True , _lowercase=("p2c", "c2p") , _lowercase="layer_norm" , _lowercase="gelu_python" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.0 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=1e-7 , _lowercase=1e-5 , _lowercase="group" , _lowercase="gelu" , _lowercase=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , _lowercase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowercase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowercase=False , _lowercase=128 , _lowercase=16 , _lowercase=True , _lowercase=0.05 , _lowercase=10 , _lowercase=2 , _lowercase=0.0 , _lowercase=10 , _lowercase=0 , _lowercase="mean" , _lowercase=False , _lowercase=False , _lowercase=256 , _lowercase=0 , _lowercase=1 , _lowercase=2 , **_lowercase , ):
"""simple docstring"""
super().__init__(**_lowercase , pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase )
_lowerCAmelCase = hidden_size
_lowerCAmelCase = feat_extract_norm
_lowerCAmelCase = feat_extract_activation
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = conv_bias
_lowerCAmelCase = num_conv_pos_embeddings
_lowerCAmelCase = num_conv_pos_embedding_groups
_lowerCAmelCase = len(self.conv_dim )
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = squeeze_factor
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = position_buckets
_lowerCAmelCase = share_att_key
_lowerCAmelCase = relative_attention
_lowerCAmelCase = norm_rel_ebd
_lowerCAmelCase = list(_lowercase )
_lowerCAmelCase = hidden_act
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = feat_proj_dropout
_lowerCAmelCase = final_dropout
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = feature_layer_norm_eps
_lowerCAmelCase = initializer_range
_lowerCAmelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
F'but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'
F'= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCAmelCase = apply_spec_augment
_lowerCAmelCase = mask_time_prob
_lowerCAmelCase = mask_time_length
_lowerCAmelCase = mask_time_min_masks
_lowerCAmelCase = mask_feature_prob
_lowerCAmelCase = mask_feature_length
_lowerCAmelCase = mask_feature_min_masks
# ctc loss
_lowerCAmelCase = ctc_loss_reduction
_lowerCAmelCase = ctc_zero_infinity
# sequence classification
_lowerCAmelCase = use_weighted_layer_sum
_lowerCAmelCase = classifier_proj_size
@property
def _lowercase ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 5
|
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_lowerCamelCase : Any = False
class lowercase ( unittest.TestCase):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class lowercase ( unittest.TestCase):
'''simple docstring'''
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(snake_case )
SCREAMING_SNAKE_CASE : Any = VersatileDiffusionPipeline.from_pretrained(snake_case , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : List[str] = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE : Union[str, Any] = 'cyberpunk 2077'
SCREAMING_SNAKE_CASE : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Any = pipe.dual_guided(
prompt=snake_case , image=snake_case , text_to_image_strength=0.75 , generator=snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
SCREAMING_SNAKE_CASE : List[Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Tuple = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Any = 'A painting of a squirrel eating a burger '
SCREAMING_SNAKE_CASE : Any = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE : Tuple = pipe.text_to_image(
prompt=snake_case , generator=snake_case , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Dict = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
SCREAMING_SNAKE_CASE : Optional[Any] = pipe.image_variation(snake_case , generator=snake_case , output_type='numpy' ).images
SCREAMING_SNAKE_CASE : Optional[int] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 352
| 0
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase_ = False
class __A ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Tuple = pipe.dual_guided(
prompt='''first prompt''' ,image=_snake_case ,text_to_image_strength=0.75 ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
lowercase__ : str = VersatileDiffusionPipeline.from_pretrained(_snake_case ,torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Tuple = generator.manual_seed(0 )
lowercase__ : Tuple = pipe.dual_guided(
prompt='''first prompt''' ,image=_snake_case ,text_to_image_strength=0.75 ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowercase__ : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Any = '''cyberpunk 2077'''
lowercase__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : str = pipe.dual_guided(
prompt=_snake_case ,image=_snake_case ,text_to_image_strength=0.75 ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ,).images
lowercase__ : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[Any] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase__ : List[Any] = '''A painting of a squirrel eating a burger '''
lowercase__ : Optional[Any] = torch.manual_seed(0 )
lowercase__ : int = pipe.text_to_image(
prompt=_snake_case ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ).images
lowercase__ : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : List[Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase__ : List[Any] = pipe.image_variation(_snake_case ,generator=_snake_case ,output_type='''numpy''' ).images
lowercase__ : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : List[str] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 122
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase_ = False
class __A ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowercase__ : List[str] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase__ : List[Any] = torch.manual_seed(0 )
lowercase__ : Tuple = pipe.dual_guided(
prompt='''first prompt''' ,image=_snake_case ,text_to_image_strength=0.75 ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_snake_case )
lowercase__ : str = VersatileDiffusionPipeline.from_pretrained(_snake_case ,torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Tuple = generator.manual_seed(0 )
lowercase__ : Tuple = pipe.dual_guided(
prompt='''first prompt''' ,image=_snake_case ,text_to_image_strength=0.75 ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def UpperCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
lowercase__ : int = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
lowercase__ : Any = '''cyberpunk 2077'''
lowercase__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : str = pipe.dual_guided(
prompt=_snake_case ,image=_snake_case ,text_to_image_strength=0.75 ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ,).images
lowercase__ : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : Optional[Any] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase__ : List[Any] = '''A painting of a squirrel eating a burger '''
lowercase__ : Optional[Any] = torch.manual_seed(0 )
lowercase__ : int = pipe.text_to_image(
prompt=_snake_case ,generator=_snake_case ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='''numpy''' ).images
lowercase__ : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : List[Any] = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase__ : List[Any] = pipe.image_variation(_snake_case ,generator=_snake_case ,output_type='''numpy''' ).images
lowercase__ : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ : List[str] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 122
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*A_ : int ,**A_ : Union[str, Any] ) -> None:
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.' ,A_ ,)
super().__init__(*A_ ,**A_ )
| 91
|
from __future__ import annotations
import numpy as np
def __a ( __lowerCAmelCase ) -> Optional[Any]:
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 352
| 0
|
'''simple docstring'''
import itertools
import os
import re
_UpperCamelCase : List[Any] = re.compile(R'([A-Z]+)([A-Z][a-z])')
_UpperCamelCase : str = re.compile(R'([a-z\d])([A-Z])')
_UpperCamelCase : List[str] = re.compile(R'(?<!_)_(?!_)')
_UpperCamelCase : Optional[Any] = re.compile(R'(_{2,})')
_UpperCamelCase : str = R"^\w+(\.\w+)*$"
_UpperCamelCase : Union[str, Any] = R"<>:/\|?*"
def __UpperCAmelCase ( A : List[str] ) -> Any:
UpperCAmelCase_ : Optional[int] = _uppercase_uppercase_re.sub(r'''\1_\2''' , UpperCamelCase__ )
UpperCAmelCase_ : List[str] = _lowercase_uppercase_re.sub(r'''\1_\2''' , UpperCamelCase__ )
return name.lower()
def __UpperCAmelCase ( A : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : Dict = _single_underscore_re.split(UpperCamelCase__ )
UpperCAmelCase_ : Dict = [_multiple_underscores_re.split(UpperCamelCase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCamelCase__ ) if n != '''''' )
def __UpperCAmelCase ( A : Dict ) -> List[str]:
if os.path.basename(UpperCamelCase__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(UpperCamelCase__ )
def __UpperCAmelCase ( A : int , A : Optional[Any] ) -> Optional[int]:
if os.path.basename(UpperCamelCase__ ) != name:
raise ValueError(F"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , UpperCamelCase__ ):
raise ValueError(F"Split name should match \'{_split_re}\'\' but got \'{split}\'." )
return F"{filename_prefix_for_name(UpperCamelCase__ )}-{split}"
def __UpperCAmelCase ( A : Dict , A : Dict , A : Any , A : str=None ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ )
if filetype_suffix:
prefix += F".{filetype_suffix}"
UpperCAmelCase_ : Tuple = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
return F"{filepath}*"
def __UpperCAmelCase ( A : Tuple , A : List[str] , A : Dict , A : Optional[Any]=None , A : str=None ) -> List[Any]:
UpperCAmelCase_ : int = filename_prefix_for_split(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase_ : List[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if shard_lengths:
UpperCAmelCase_ : List[Any] = len(UpperCamelCase__ )
UpperCAmelCase_ : List[str] = [F"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(UpperCamelCase__ )]
if filetype_suffix:
UpperCAmelCase_ : Tuple = [filename + F".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCAmelCase_ : Tuple = prefix
if filetype_suffix:
filename += F".{filetype_suffix}"
return [filename]
| 703
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = BioGptTokenizer
a_ = False
def A ( self : int ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ : Any = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
UpperCAmelCase_ : List[str] = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase_ : Tuple = ['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', '''''']
UpperCAmelCase_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' ) as fp:
fp.write(json.dumps(_A ) )
with open(self.merges_file , '''w''' ) as fp:
fp.write('''\n'''.join(_A ) )
def A ( self : List[Any] , _A : Optional[Any] ) -> Any:
UpperCAmelCase_ : int = '''lower newer'''
UpperCAmelCase_ : Tuple = '''lower newer'''
return input_text, output_text
def A ( self : str ) -> Optional[Any]:
UpperCAmelCase_ : List[Any] = BioGptTokenizer(self.vocab_file , self.merges_file )
UpperCAmelCase_ : Tuple = '''lower'''
UpperCAmelCase_ : List[Any] = ['''low''', '''er</w>''']
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase_ : Any = tokens + ['''<unk>''']
UpperCAmelCase_ : List[Any] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , _A )
@slow
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
UpperCAmelCase_ : List[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
UpperCAmelCase_ : Any = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
UpperCAmelCase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A )
UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 216
| 0
|
from __future__ import annotations
from PIL import Image
# Define glider example
_lowerCAmelCase: Optional[int] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
_lowerCAmelCase: int = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _lowercase( __a : str ):
a__ =[]
for i in range(len(_snake_case ) ):
a__ =[]
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
a__ =0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(_snake_case ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(_snake_case ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(_snake_case ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
a__ =cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(_snake_case )
return next_generation
def _lowercase( __a : List[Any] , __a : Optional[int] ):
a__ =[]
for _ in range(_snake_case ):
# Create output image
a__ =Image.new('RGB' , (len(cells[0] ), len(_snake_case )) )
a__ =img.load()
# Save cells to image
for x in range(len(_snake_case ) ):
for y in range(len(cells[0] ) ):
a__ =255 - cells[y][x] * 255
a__ =(colour, colour, colour)
# Save image
images.append(_snake_case )
a__ =new_generation(_snake_case )
return images
if __name__ == "__main__":
_lowerCAmelCase: int = generate_images(GLIDER, 16)
images[0].save('out.gif', save_all=True, append_images=images[1:])
| 20
|
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ : Tuple = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ : Optional[Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
UpperCAmelCase__ : Dict = transformers.models.auto.configuration_auto.CONFIG_MAPPING
UpperCAmelCase__ : Dict = {
# used to compute the property `self.chunk_length`
'EncodecConfig': ['overlap'],
# used as `self.bert_model = BertModel(config, ...)`
'DPRConfig': True,
# not used in modeling files, but it's an important information
'FSMTConfig': ['langs'],
# used internally in the configuration class file
'GPTNeoConfig': ['attention_types'],
# used internally in the configuration class file
'EsmConfig': ['is_folding_model'],
# used during training (despite we don't have training script for these models yet)
'Mask2FormerConfig': ['ignore_value'],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'OneFormerConfig': ['ignore_value', 'norm'],
# used during preprocessing and collation, see `collating_graphormer.py`
'GraphormerConfig': ['spatial_pos_max'],
# used internally in the configuration class file
'T5Config': ['feed_forward_proj'],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'MT5Config': ['feed_forward_proj', 'tokenizer_class'],
'UMT5Config': ['feed_forward_proj', 'tokenizer_class'],
# used internally in the configuration class file
'LongT5Config': ['feed_forward_proj'],
# used internally in the configuration class file
'SwitchTransformersConfig': ['feed_forward_proj'],
# having default values other than `1e-5` - we can't fix them without breaking
'BioGptConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'GLPNConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'SegformerConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'CvtConfig': ['layer_norm_eps'],
# having default values other than `1e-5` - we can't fix them without breaking
'PerceiverConfig': ['layer_norm_eps'],
# used internally to calculate the feature size
'InformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'TimeSeriesTransformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate the feature size
'AutoformerConfig': ['num_static_real_features', 'num_time_features'],
# used internally to calculate `mlp_dim`
'SamVisionConfig': ['mlp_ratio'],
# For (head) training, but so far not implemented
'ClapAudioConfig': ['num_classes'],
# Not used, but providing useful information to users
'SpeechT5HifiGanConfig': ['sampling_rate'],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def lowercase_ ( _snake_case ,_snake_case ,_snake_case ,_snake_case ):
SCREAMING_SNAKE_CASE__ : List[str] = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f'''config.{attribute}''' in modeling_source
or f'''getattr(config, "{attribute}"''' in modeling_source
or f'''getattr(self.config, "{attribute}"''' in modeling_source
):
SCREAMING_SNAKE_CASE__ : Tuple = True
# Deal with multi-line cases
elif (
re.search(
Rf'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' ,_snake_case ,)
is not None
):
SCREAMING_SNAKE_CASE__ : Dict = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
SCREAMING_SNAKE_CASE__ : List[str] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
SCREAMING_SNAKE_CASE__ : List[Any] = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
SCREAMING_SNAKE_CASE__ : List[str] = True
if not attribute_used:
SCREAMING_SNAKE_CASE__ : List[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
SCREAMING_SNAKE_CASE__ : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
SCREAMING_SNAKE_CASE__ : int = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
elif attribute.endswith("""_token_id""" ):
SCREAMING_SNAKE_CASE__ : List[Any] = True
# configuration class specific cases
if not case_allowed:
SCREAMING_SNAKE_CASE__ : Tuple = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ ,[] )
SCREAMING_SNAKE_CASE__ : int = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def lowercase_ ( _snake_case ):
SCREAMING_SNAKE_CASE__ : Any = dict(inspect.signature(config_class.__init__ ).parameters )
SCREAMING_SNAKE_CASE__ : Optional[int] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
SCREAMING_SNAKE_CASE__ : List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
SCREAMING_SNAKE_CASE__ : List[Any] = {}
if len(config_class.attribute_map ) > 0:
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
SCREAMING_SNAKE_CASE__ : int = inspect.getsourcefile(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = os.path.dirname(_snake_case )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
SCREAMING_SNAKE_CASE__ : Optional[Any] = [os.path.join(_snake_case ,_snake_case ) for fn in os.listdir(_snake_case ) if fn.startswith("""modeling_""" )]
# Get the source code strings
SCREAMING_SNAKE_CASE__ : Any = []
for path in modeling_paths:
if os.path.isfile(_snake_case ):
with open(_snake_case ) as fp:
modeling_sources.append(fp.read() )
SCREAMING_SNAKE_CASE__ : List[str] = []
for config_param, default_value in zip(_snake_case ,_snake_case ):
# `attributes` here is all the variant names for `config_param`
SCREAMING_SNAKE_CASE__ : List[Any] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(_snake_case ,_snake_case ,_snake_case ,_snake_case ):
unused_attributes.append(attributes[0] )
return sorted(_snake_case )
def lowercase_ ( ):
SCREAMING_SNAKE_CASE__ : Tuple = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) ,lambda _snake_case : inspect.isclass(_snake_case )
and issubclass(_snake_case ,_snake_case )
and inspect.getmodule(_snake_case ) == inspect.getmodule(_config_class ) ,)
]
for config_class in config_classes_in_module:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = check_config_attributes_being_used(_snake_case )
if len(_snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : str = unused_attributes
if len(_snake_case ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f'''{name}: {attributes}\n'''
raise ValueError(_snake_case )
if __name__ == "__main__":
check_config_attributes()
| 223
| 0
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[int]:
if is_torch_version("""<""" , """2.0.0""") or not hasattr(SCREAMING_SNAKE_CASE__ , """_dynamo"""):
return False
return isinstance(SCREAMING_SNAKE_CASE__ , torch._dynamo.eval_frame.OptimizedModule)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True) -> Tuple:
__snake_case: str = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__snake_case: str = is_compiled_module(SCREAMING_SNAKE_CASE__)
if is_compiled:
__snake_case: Union[str, Any] = model
__snake_case: str = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: List[str] = model.module
if not keep_fpaa_wrapper:
__snake_case: Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , """forward""")
__snake_case: Any = model.__dict__.pop("""_original_forward""" , SCREAMING_SNAKE_CASE__)
if original_forward is not None:
while hasattr(SCREAMING_SNAKE_CASE__ , """__wrapped__"""):
__snake_case: Dict = forward.__wrapped__
if forward == original_forward:
break
__snake_case: str = forward
if getattr(SCREAMING_SNAKE_CASE__ , """_converted_to_transformer_engine""" , SCREAMING_SNAKE_CASE__):
convert_model(SCREAMING_SNAKE_CASE__ , to_transformer_engine=SCREAMING_SNAKE_CASE__)
if is_compiled:
__snake_case: Optional[Any] = model
__snake_case: List[str] = compiled_model
return model
def A__ ( ) -> List[Any]:
PartialState().wait_for_everyone()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
elif PartialState().local_process_index == 0:
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
@contextmanager
def A__ ( **SCREAMING_SNAKE_CASE__) -> List[Any]:
for key, value in kwargs.items():
__snake_case: Optional[Any] = str(SCREAMING_SNAKE_CASE__)
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
if not hasattr(SCREAMING_SNAKE_CASE__ , """__qualname__""") and not hasattr(SCREAMING_SNAKE_CASE__ , """__name__"""):
__snake_case: Union[str, Any] = getattr(SCREAMING_SNAKE_CASE__ , """__class__""" , SCREAMING_SNAKE_CASE__)
if hasattr(SCREAMING_SNAKE_CASE__ , """__qualname__"""):
return obj.__qualname__
if hasattr(SCREAMING_SNAKE_CASE__ , """__name__"""):
return obj.__name__
return str(SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
for key, value in source.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: List[str] = destination.setdefault(SCREAMING_SNAKE_CASE__ , {})
merge_dicts(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
else:
__snake_case: Union[str, Any] = value
return destination
def A__ ( SCREAMING_SNAKE_CASE__ = None) -> bool:
if port is None:
__snake_case: str = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM) as s:
return s.connect_ex(("""localhost""", port)) == 0
| 721
|
from collections.abc import Iterable
from typing import Generic, TypeVar
__UpperCAmelCase : Dict = TypeVar("_T")
class __snake_case ( Generic[_T] ):
'''simple docstring'''
def __init__( self : Union[str, Any] , A : Iterable[_T] | None = None ):
__snake_case: list[_T] = list(iterable or [] )
__snake_case: list[_T] = []
def __len__( self : Union[str, Any] ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Any ):
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def UpperCAmelCase__ ( self : List[Any] , A : _T ):
self._stacka.append(A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Any = self._stacka.pop
__snake_case: Tuple = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 155
| 0
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , ) -> int:
if config_name_or_path is None:
_lowercase : int = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
_lowercase : int = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
_lowercase : str = question_encoder_name_or_path
_lowercase : Dict = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
_lowercase : Dict = RagConfig.from_pretrained(lowerCamelCase_ )
_lowercase : Dict = AutoConfig.from_pretrained(lowerCamelCase_ )
_lowercase : Union[str, Any] = AutoConfig.from_pretrained(lowerCamelCase_ )
_lowercase : Any = gen_config
_lowercase : Dict = question_encoder_config
_lowercase : List[Any] = model_class.from_pretrained_question_encoder_generator(
lowerCamelCase_ , lowerCamelCase_ , config=lowerCamelCase_ )
rag_model.save_pretrained(lowerCamelCase_ )
# Sanity check.
model_class.from_pretrained(lowerCamelCase_ )
# Save tokenizers.
_lowercase : int = AutoTokenizer.from_pretrained(lowerCamelCase_ )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
_lowercase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCamelCase_ )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
SCREAMING_SNAKE_CASE : Dict = parser.parse_args()
SCREAMING_SNAKE_CASE : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 89
|
'''simple docstring'''
from math import factorial
def __snake_case ( SCREAMING_SNAKE_CASE_ : int = 100 ) -> int:
"""simple docstring"""
return sum(int(SCREAMING_SNAKE_CASE_ ) for x in str(factorial(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 51
| 0
|
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
lowercase_ = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
lowercase_ = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __lowerCAmelCase ( ):
lowercase__ = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
lowercase__ = bs[:]
lowercase__ = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE_ )
cs.append(2**8 + n )
n += 1
lowercase__ = [chr(SCREAMING_SNAKE_CASE_ ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = set()
lowercase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ = char
return pairs
class _snake_case ( lowercase__):
UpperCamelCase__ : Any =VOCAB_FILES_NAMES
UpperCamelCase__ : Any =PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[str] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : int =["""input_ids""", """attention_mask"""]
def __init__( self : int, __lowercase : List[str], __lowercase : Union[str, Any], __lowercase : str="replace", __lowercase : int="<s>", __lowercase : Dict="</s>", __lowercase : List[str]="</s>", __lowercase : int="<s>", __lowercase : List[Any]="<unk>", __lowercase : Dict="<pad>", __lowercase : Union[str, Any]="<mask>", __lowercase : int=False, **__lowercase : int, ):
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else bos_token
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else eos_token
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else sep_token
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else cls_token
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else unk_token
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ = AddedToken(__lowercase, lstrip=__lowercase, rstrip=__lowercase ) if isinstance(__lowercase, __lowercase ) else mask_token
super().__init__(
errors=__lowercase, bos_token=__lowercase, eos_token=__lowercase, unk_token=__lowercase, sep_token=__lowercase, cls_token=__lowercase, pad_token=__lowercase, mask_token=__lowercase, add_prefix_space=__lowercase, **__lowercase, )
with open(__lowercase, encoding="utf-8" ) as vocab_handle:
lowercase__ = json.load(__lowercase )
lowercase__ = {v: k for k, v in self.encoder.items()}
lowercase__ = errors # how to handle errors in decoding
lowercase__ = bytes_to_unicode()
lowercase__ = {v: k for k, v in self.byte_encoder.items()}
with open(__lowercase, encoding="utf-8" ) as merges_handle:
lowercase__ = merges_handle.read().split("\n" )[1:-1]
lowercase__ = [tuple(merge.split() ) for merge in bpe_merges]
lowercase__ = dict(zip(__lowercase, range(len(__lowercase ) ) ) )
lowercase__ = {}
lowercase__ = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Tuple ):
return len(self.encoder )
def A__ ( self : str ):
return dict(self.encoder, **self.added_tokens_encoder )
def A__ ( self : Optional[int], __lowercase : Optional[int] ):
if token in self.cache:
return self.cache[token]
lowercase__ = tuple(__lowercase )
lowercase__ = get_pairs(__lowercase )
if not pairs:
return token
while True:
lowercase__ = min(__lowercase, key=lambda __lowercase : self.bpe_ranks.get(__lowercase, float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ = bigram
lowercase__ = []
lowercase__ = 0
while i < len(__lowercase ):
try:
lowercase__ = word.index(__lowercase, __lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase__ = j
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase__ = tuple(__lowercase )
lowercase__ = new_word
if len(__lowercase ) == 1:
break
else:
lowercase__ = get_pairs(__lowercase )
lowercase__ = " ".join(__lowercase )
lowercase__ = word
return word
def A__ ( self : Union[str, Any], __lowercase : Union[str, Any] ):
lowercase__ = []
for token in re.findall(self.pat, __lowercase ):
lowercase__ = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowercase ).split(" " ) )
return bpe_tokens
def A__ ( self : List[Any], __lowercase : int ):
return self.encoder.get(__lowercase, self.encoder.get(self.unk_token ) )
def A__ ( self : Tuple, __lowercase : Optional[int] ):
return self.decoder.get(__lowercase )
def A__ ( self : List[str], __lowercase : str ):
lowercase__ = "".join(__lowercase )
lowercase__ = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8", errors=self.errors )
return text
def A__ ( self : Tuple, __lowercase : str, __lowercase : Optional[str] = None ):
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ = os.path.join(
__lowercase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ = os.path.join(
__lowercase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__lowercase, "w", encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__lowercase, ensure_ascii=__lowercase ) + "\n" )
lowercase__ = 0
with open(__lowercase, "w", encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
lowercase__ = token_index
writer.write(" ".join(__lowercase ) + "\n" )
index += 1
return vocab_file, merge_file
def A__ ( self : str, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : List[Any], __lowercase : List[int], __lowercase : Optional[List[int]] = None, __lowercase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowercase, token_ids_a=__lowercase, already_has_special_tokens=__lowercase )
if token_ids_a is None:
return [1] + ([0] * len(__lowercase )) + [1]
return [1] + ([0] * len(__lowercase )) + [1, 1] + ([0] * len(__lowercase )) + [1]
def A__ ( self : int, __lowercase : List[int], __lowercase : Optional[List[int]] = None ):
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[Any], __lowercase : str, __lowercase : int=False, **__lowercase : Optional[int] ):
lowercase__ = kwargs.pop("add_prefix_space", self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowercase ) > 0 and not text[0].isspace()):
lowercase__ = " " + text
return (text, kwargs)
def A__ ( self : str, __lowercase : Union[Dict[str, EncodedInput], BatchEncoding], __lowercase : Optional[int] = None, __lowercase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, __lowercase : Optional[int] = None, __lowercase : Optional[bool] = None, ):
lowercase__ = super()._pad(
encoded_inputs=__lowercase, max_length=__lowercase, padding_strategy=__lowercase, pad_to_multiple_of=__lowercase, return_attention_mask=__lowercase, )
# Load from model defaults
if return_attention_mask is None:
lowercase__ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase__ = len(encoded_inputs["global_attention_mask"] ) != len(__lowercase )
if needs_to_be_padded:
lowercase__ = len(__lowercase ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase__ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
lowercase__ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 37
|
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _snake_case ( unittest.TestCase):
def __init__( self : Dict, __lowercase : int, __lowercase : Union[str, Any]=7, __lowercase : Union[str, Any]=3, __lowercase : Any=18, __lowercase : Union[str, Any]=30, __lowercase : Any=400, __lowercase : List[str]=True, __lowercase : Dict=None, __lowercase : List[str]=True, __lowercase : int=False, __lowercase : Union[str, Any]=True, __lowercase : str=True, __lowercase : Optional[int]=[0.5, 0.5, 0.5], __lowercase : List[Any]=[0.5, 0.5, 0.5], ):
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = min_resolution
lowercase__ = max_resolution
lowercase__ = do_resize
lowercase__ = size if size is not None else {"height": 18, "width": 20}
lowercase__ = do_thumbnail
lowercase__ = do_align_axis
lowercase__ = do_pad
lowercase__ = do_normalize
lowercase__ = image_mean
lowercase__ = image_std
def A__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _snake_case ( lowercase__ , unittest.TestCase):
UpperCamelCase__ : Optional[int] =DonutImageProcessor if is_vision_available() else None
def A__ ( self : str ):
lowercase__ = DonutImageProcessingTester(self )
@property
def A__ ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : Optional[Any] ):
lowercase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase, "do_resize" ) )
self.assertTrue(hasattr(__lowercase, "size" ) )
self.assertTrue(hasattr(__lowercase, "do_thumbnail" ) )
self.assertTrue(hasattr(__lowercase, "do_align_long_axis" ) )
self.assertTrue(hasattr(__lowercase, "do_pad" ) )
self.assertTrue(hasattr(__lowercase, "do_normalize" ) )
self.assertTrue(hasattr(__lowercase, "image_mean" ) )
self.assertTrue(hasattr(__lowercase, "image_std" ) )
def A__ ( self : str ):
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 18, "width": 20} )
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {"height": 42, "width": 42} )
# Previous config had dimensions in (width, height) order
lowercase__ = self.image_processing_class.from_dict(self.image_processor_dict, size=(42, 84) )
self.assertEqual(image_processor.size, {"height": 84, "width": 42} )
def A__ ( self : List[str] ):
pass
@is_flaky()
def A__ ( self : Dict ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, Image.Image )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Optional[Any] ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, np.ndarray )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
@is_flaky()
def A__ ( self : Tuple ):
# Initialize image_processing
lowercase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase__ = prepare_image_inputs(self.image_processor_tester, equal_resolution=__lowercase, torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase, torch.Tensor )
# Test not batched input
lowercase__ = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
# Test batched
lowercase__ = image_processing(__lowercase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
), )
| 37
| 1
|
"""simple docstring"""
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__UpperCamelCase = ['image_processor', 'tokenizer']
__UpperCamelCase = 'BridgeTowerImageProcessor'
__UpperCamelCase = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self : Any , lowercase_ : List[Any] , lowercase_ : int):
'''simple docstring'''
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def __call__( self : Dict , lowercase_ : List[str] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : int , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , pad_to_multiple_of=SCREAMING_SNAKE_CASE__ , return_token_type_ids=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_special_tokens_mask=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , return_length=SCREAMING_SNAKE_CASE__ , verbose=SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# add pixel_values + pixel_mask
SCREAMING_SNAKE_CASE_ : str = self.image_processor(
SCREAMING_SNAKE_CASE__ , return_tensors=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , do_center_crop=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__)
encoding.update(SCREAMING_SNAKE_CASE__)
return encoding
def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowercase_ : Optional[int] , **lowercase_ : Any):
'''simple docstring'''
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__)
def _SCREAMING_SNAKE_CASE ( self : str , *lowercase_ : int , **lowercase_ : List[str]):
'''simple docstring'''
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__)
@property
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 512
|
"""simple docstring"""
def snake_case ( _a: int , _a: int )-> int:
'''simple docstring'''
lowerCamelCase__ = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
lowerCamelCase__ = n - k
# Calculate C(n,k)
for i in range(_a ):
result *= n - i
result //= i + 1
return result
def snake_case ( _a: int )-> int:
'''simple docstring'''
return binomial_coefficient(2 * node_count , _a ) // (node_count + 1)
def snake_case ( _a: int )-> int:
'''simple docstring'''
if n < 0:
raise ValueError('factorial() not defined for negative values' )
lowerCamelCase__ = 1
for i in range(1 , n + 1 ):
result *= i
return result
def snake_case ( _a: int )-> int:
'''simple docstring'''
return catalan_number(_a ) * factorial(_a )
if __name__ == "__main__":
_snake_case = int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f"""Given {node_count} nodes, there are {binary_tree_count(node_count)} """
f"""binary trees and {catalan_number(node_count)} binary search trees."""
)
| 510
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def __lowerCAmelCase ( A_ : Tuple ) -> Optional[int]:
__UpperCAmelCase = SwinConfig()
__UpperCAmelCase = swin_name.split("_" )
__UpperCAmelCase = name_split[1]
__UpperCAmelCase = int(name_split[4] )
__UpperCAmelCase = int(name_split[3][-1] )
if model_size == "tiny":
__UpperCAmelCase = 96
__UpperCAmelCase = (2, 2, 6, 2)
__UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "small":
__UpperCAmelCase = 96
__UpperCAmelCase = (2, 2, 18, 2)
__UpperCAmelCase = (3, 6, 12, 24)
elif model_size == "base":
__UpperCAmelCase = 1_28
__UpperCAmelCase = (2, 2, 18, 2)
__UpperCAmelCase = (4, 8, 16, 32)
else:
__UpperCAmelCase = 1_92
__UpperCAmelCase = (2, 2, 18, 2)
__UpperCAmelCase = (6, 12, 24, 48)
if "in22k" in swin_name:
__UpperCAmelCase = 2_18_41
else:
__UpperCAmelCase = 10_00
__UpperCAmelCase = "huggingface/label-files"
__UpperCAmelCase = "imagenet-1k-id2label.json"
__UpperCAmelCase = json.load(open(hf_hub_download(A_ , A_ , repo_type="dataset" ) , "r" ) )
__UpperCAmelCase = {int(A_ ): v for k, v in idalabel.items()}
__UpperCAmelCase = idalabel
__UpperCAmelCase = {v: k for k, v in idalabel.items()}
__UpperCAmelCase = img_size
__UpperCAmelCase = num_classes
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
return config
def __lowerCAmelCase ( A_ : int ) -> Any:
if "patch_embed.proj" in name:
__UpperCAmelCase = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
__UpperCAmelCase = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
__UpperCAmelCase = "encoder." + name
if "attn.proj" in name:
__UpperCAmelCase = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
__UpperCAmelCase = name.replace("attn" , "attention.self" )
if "norm1" in name:
__UpperCAmelCase = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
__UpperCAmelCase = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
__UpperCAmelCase = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
__UpperCAmelCase = name.replace("mlp.fc2" , "output.dense" )
if name == "norm.weight":
__UpperCAmelCase = "layernorm.weight"
if name == "norm.bias":
__UpperCAmelCase = "layernorm.bias"
if "head" in name:
__UpperCAmelCase = name.replace("head" , "classifier" )
else:
__UpperCAmelCase = "swin." + name
return name
def __lowerCAmelCase ( A_ : Tuple , A_ : int ) -> Tuple:
for key in orig_state_dict.copy().keys():
__UpperCAmelCase = orig_state_dict.pop(A_ )
if "mask" in key:
continue
elif "qkv" in key:
__UpperCAmelCase = key.split("." )
__UpperCAmelCase = int(key_split[1] )
__UpperCAmelCase = int(key_split[3] )
__UpperCAmelCase = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__UpperCAmelCase = val[:dim, :]
__UpperCAmelCase = val[
dim : dim * 2, :
]
__UpperCAmelCase = val[-dim:, :]
else:
__UpperCAmelCase = val[
:dim
]
__UpperCAmelCase = val[
dim : dim * 2
]
__UpperCAmelCase = val[
-dim:
]
else:
__UpperCAmelCase = val
return orig_state_dict
def __lowerCAmelCase ( A_ : Optional[Any] , A_ : List[Any] ) -> Tuple:
__UpperCAmelCase = timm.create_model(A_ , pretrained=A_ )
timm_model.eval()
__UpperCAmelCase = get_swin_config(A_ )
__UpperCAmelCase = SwinForImageClassification(A_ )
model.eval()
__UpperCAmelCase = convert_state_dict(timm_model.state_dict() , A_ )
model.load_state_dict(A_ )
__UpperCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__UpperCAmelCase = AutoImageProcessor.from_pretrained("microsoft/{}".format(swin_name.replace("_" , "-" ) ) )
__UpperCAmelCase = Image.open(requests.get(A_ , stream=A_ ).raw )
__UpperCAmelCase = image_processor(images=A_ , return_tensors="pt" )
__UpperCAmelCase = timm_model(inputs["pixel_values"] )
__UpperCAmelCase = model(**A_ ).logits
assert torch.allclose(A_ , A_ , atol=1e-3 )
print(F'''Saving model {swin_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(A_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(A_ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
a_ = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 713
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=snake_case ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = ['transformers', 'torch', 'note_seq']
def __init__( self: List[str] , *__lowerCAmelCase: Optional[int] , **__lowerCAmelCase: List[Any] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def _UpperCAmelCase ( cls: Optional[int] , *__lowerCAmelCase: Any , **__lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def _UpperCAmelCase ( cls: Union[str, Any] , *__lowerCAmelCase: Optional[Any] , **__lowerCAmelCase: Optional[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 286
| 0
|
'''simple docstring'''
import math
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 ) -> list:
__snake_case = end or len(_UpperCAmelCase )
for i in range(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case = i
__snake_case = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
__snake_case = array[temp_index - 1]
temp_index -= 1
__snake_case = temp_index_value
return array
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> None: # Max Heap
__snake_case = index
__snake_case = 2 * index + 1 # Left Node
__snake_case = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
__snake_case = left_index
if right_index < heap_size and array[largest] < array[right_index]:
__snake_case = right_index
if largest != index:
__snake_case , __snake_case = array[largest], array[index]
heapify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> list:
__snake_case = len(_UpperCAmelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
for i in range(n - 1 , 0 , -1 ):
__snake_case , __snake_case = array[0], array[i]
heapify(_UpperCAmelCase , 0 , _UpperCAmelCase )
return array
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> int:
__snake_case = low
__snake_case = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
__snake_case , __snake_case = array[j], array[i]
i += 1
def __UpperCAmelCase ( _UpperCAmelCase : list ) -> list:
if len(_UpperCAmelCase ) == 0:
return array
__snake_case = 2 * math.ceil(math.loga(len(_UpperCAmelCase ) ) )
__snake_case = 16
return intro_sort(_UpperCAmelCase , 0 , len(_UpperCAmelCase ) , _UpperCAmelCase , _UpperCAmelCase )
def __UpperCAmelCase ( _UpperCAmelCase : list , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_UpperCAmelCase )
max_depth -= 1
__snake_case = median_of_a(_UpperCAmelCase , _UpperCAmelCase , start + ((end - start) // 2) + 1 , end - 1 )
__snake_case = partition(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
intro_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__snake_case = p
return insertion_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Dict = input('''Enter numbers separated by a comma : ''').strip()
a : str = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 69
|
'''simple docstring'''
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=1E-12 ) -> List[Any]:
__lowerCamelCase : int = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(_lowerCAmelCase ,axis=1 ) ,a_min=_lowerCAmelCase ) ).T
__lowerCamelCase : Tuple = jnp.divide(emb_a.T ,jnp.clip(jnp.linalg.norm(_lowerCAmelCase ,axis=1 ) ,a_min=_lowerCAmelCase ) ).T
return jnp.matmul(_lowerCAmelCase ,norm_emb_a.T )
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
a_ =42
a_ =jnp.floataa
def _lowercase ( self : Optional[Any] ) -> Tuple:
__lowerCamelCase : Optional[int] = FlaxCLIPVisionModule(self.config.vision_config )
__lowerCamelCase : Any = nn.Dense(self.config.projection_dim , use_bias=_a , dtype=self.dtype )
__lowerCamelCase : str = self.param('concept_embeds' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
__lowerCamelCase : str = self.param(
'special_care_embeds' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
__lowerCamelCase : Optional[int] = self.param('concept_embeds_weights' , jax.nn.initializers.ones , (17,) )
__lowerCamelCase : Dict = self.param('special_care_embeds_weights' , jax.nn.initializers.ones , (3,) )
def __call__( self : Tuple , _a : Any ) -> Optional[int]:
__lowerCamelCase : int = self.vision_model(_a )[1]
__lowerCamelCase : Optional[int] = self.visual_projection(_a )
__lowerCamelCase : Optional[Any] = jax_cosine_distance(_a , self.special_care_embeds )
__lowerCamelCase : Tuple = jax_cosine_distance(_a , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
__lowerCamelCase : Dict = 0.0
__lowerCamelCase : str = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
__lowerCamelCase : Optional[int] = jnp.round(_a , 3 )
__lowerCamelCase : Dict = jnp.any(special_scores > 0 , axis=1 , keepdims=_a )
# Use a lower threshold if an image has any special care concept
__lowerCamelCase : Dict = is_special_care * 0.01
__lowerCamelCase : Optional[int] = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
__lowerCamelCase : List[str] = jnp.round(_a , 3 )
__lowerCamelCase : List[Any] = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =CLIPConfig
a_ ="""clip_input"""
a_ =FlaxStableDiffusionSafetyCheckerModule
def __init__( self : List[str] , _a : CLIPConfig , _a : Optional[Tuple] = None , _a : int = 0 , _a : jnp.dtype = jnp.floataa , _a : bool = True , **_a : List[Any] , ) -> List[Any]:
if input_shape is None:
__lowerCamelCase : str = (1, 224, 224, 3)
__lowerCamelCase : List[str] = self.module_class(config=_a , dtype=_a , **_a )
super().__init__(_a , _a , input_shape=_a , seed=_a , dtype=_a , _do_init=_do_init )
def _lowercase ( self : int , _a : jax.random.KeyArray , _a : Tuple , _a : FrozenDict = None ) -> FrozenDict:
# init input tensor
__lowerCamelCase : Optional[int] = jax.random.normal(_a , _a )
__lowerCamelCase ,__lowerCamelCase : Optional[Any] = jax.random.split(_a )
__lowerCamelCase : int = {'params': params_rng, 'dropout': dropout_rng}
__lowerCamelCase : Tuple = self.module.init(_a , _a )['params']
return random_params
def __call__( self : Optional[int] , _a : int , _a : dict = None , ) -> Dict:
__lowerCamelCase : str = jnp.transpose(_a , (0, 2, 3, 1) )
return self.module.apply(
{'params': params or self.params} , jnp.array(_a , dtype=jnp.floataa ) , rngs={} , )
| 459
| 0
|
"""simple docstring"""
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1e-12 )-> Optional[int]:
_SCREAMING_SNAKE_CASE : Dict = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T
_SCREAMING_SNAKE_CASE : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T
return jnp.matmul(__SCREAMING_SNAKE_CASE , norm_emb_a.T )
class _snake_case ( nn.Module ):
"""simple docstring"""
a = 42
a = jnp.floataa
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config)
_SCREAMING_SNAKE_CASE : Tuple = nn.Dense(self.config.projection_dim , use_bias=_A , dtype=self.dtype)
_SCREAMING_SNAKE_CASE : Dict = self.param("""concept_embeds""" , jax.nn.initializers.ones , (1_7, self.config.projection_dim))
_SCREAMING_SNAKE_CASE : Any = self.param(
"""special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim))
_SCREAMING_SNAKE_CASE : Dict = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (1_7,))
_SCREAMING_SNAKE_CASE : int = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,))
def __call__( self : List[Any] , _A : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = self.vision_model(_A)[1]
_SCREAMING_SNAKE_CASE : Optional[int] = self.visual_projection(_A)
_SCREAMING_SNAKE_CASE : str = jax_cosine_distance(_A , self.special_care_embeds)
_SCREAMING_SNAKE_CASE : Any = jax_cosine_distance(_A , self.concept_embeds)
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
_SCREAMING_SNAKE_CASE : Dict = 0.0
_SCREAMING_SNAKE_CASE : Optional[int] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
_SCREAMING_SNAKE_CASE : Tuple = jnp.round(_A , 3)
_SCREAMING_SNAKE_CASE : List[str] = jnp.any(special_scores > 0 , axis=1 , keepdims=_A)
# Use a lower threshold if an image has any special care concept
_SCREAMING_SNAKE_CASE : List[Any] = is_special_care * 0.01
_SCREAMING_SNAKE_CASE : Dict = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
_SCREAMING_SNAKE_CASE : int = jnp.round(_A , 3)
_SCREAMING_SNAKE_CASE : Optional[int] = jnp.any(concept_scores > 0 , axis=1)
return has_nsfw_concepts
class _snake_case ( __snake_case ):
"""simple docstring"""
a = CLIPConfig
a = "clip_input"
a = FlaxStableDiffusionSafetyCheckerModule
def __init__( self : Optional[int] , _A : CLIPConfig , _A : Optional[Tuple] = None , _A : int = 0 , _A : jnp.dtype = jnp.floataa , _A : bool = True , **_A : Optional[int] , ):
"""simple docstring"""
if input_shape is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = (1, 2_2_4, 2_2_4, 3)
_SCREAMING_SNAKE_CASE : Any = self.module_class(config=_A , dtype=_A , **_A)
super().__init__(_A , _A , input_shape=_A , seed=_A , dtype=_A , _do_init=_do_init)
def _lowerCAmelCase ( self : Dict , _A : jax.random.KeyArray , _A : Tuple , _A : FrozenDict = None):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = jax.random.normal(_A , _A)
_SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_A)
_SCREAMING_SNAKE_CASE : Tuple = {"""params""": params_rng, """dropout""": dropout_rng}
_SCREAMING_SNAKE_CASE : Optional[int] = self.module.init(_A , _A)["""params"""]
return random_params
def __call__( self : Tuple , _A : Union[str, Any] , _A : dict = None , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Tuple = jnp.transpose(_A , (0, 2, 3, 1))
return self.module.apply(
{"""params""": params or self.params} , jnp.array(_A , dtype=jnp.floataa) , rngs={} , )
| 720
|
"""simple docstring"""
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , )
assert hasattr(self , """env""")
def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , )
def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]):
"""simple docstring"""
TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""")
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = self.create_estimator()
# run training
estimator.fit()
# result dataframe
_SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""])
_SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_SCREAMING_SNAKE_CASE : int = (
Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy)
assert all(t <= self.results["""eval_loss"""] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
| 635
| 0
|
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def __lowerCamelCase ( ) -> Dict:
raise RuntimeError("""CUDA out of memory.""" )
class _lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[int] )-> Tuple:
super().__init__()
snake_case = nn.Linear(3 , 4 )
snake_case = nn.BatchNormad(4 )
snake_case = nn.Linear(4 , 5 )
def lowerCAmelCase ( self : Dict , __snake_case : Optional[Any] )-> Union[str, Any]:
return self.lineara(self.batchnorm(self.lineara(__snake_case ) ) )
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : Any )-> Optional[int]:
snake_case = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(__snake_case : Optional[int] ):
nonlocal batch_sizes
batch_sizes.append(__snake_case )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__snake_case , [1_28, 64, 32, 16, 8] )
def lowerCAmelCase ( self : Optional[Any] )-> int:
snake_case = []
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(__snake_case : Optional[Any] , __snake_case : List[Any] ):
nonlocal batch_sizes
batch_sizes.append(__snake_case )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
snake_case , snake_case = mock_training_loop_function("""hello""" )
self.assertListEqual(__snake_case , [1_28, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] , [8, """hello"""] )
def lowerCAmelCase ( self : Any )-> int:
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__snake_case : Union[str, Any] ):
pass
with self.assertRaises(__snake_case ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def lowerCAmelCase ( self : Optional[Any] )-> Tuple:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__snake_case : Any ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__snake_case ) as cm:
mock_training_loop_function()
self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] )
def lowerCAmelCase ( self : Tuple )-> int:
@find_executable_batch_size(starting_batch_size=1_28 )
def mock_training_loop_function(__snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : Optional[Any] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__snake_case ) as cm:
mock_training_loop_function(1_28 , """hello""" , """world""" )
self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] )
self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] )
def lowerCAmelCase ( self : Optional[Any] )-> int:
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(__snake_case : Optional[Any] ):
raise ValueError("""Oops, we had an error!""" )
with self.assertRaises(__snake_case ) as cm:
mock_training_loop_function()
self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] )
@require_cuda
def lowerCAmelCase ( self : Tuple )-> str:
snake_case = torch.cuda.memory_allocated()
snake_case = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __snake_case )
snake_case = release_memory(__snake_case )
self.assertEqual(torch.cuda.memory_allocated() , __snake_case )
| 369
|
'''simple docstring'''
from manim import *
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
def lowerCAmelCase ( self : Optional[Any] )-> Union[str, Any]:
snake_case = Rectangle(height=0.5 , width=0.5 )
snake_case = Rectangle(height=0.25 , width=0.25 )
snake_case = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
snake_case = [mem.copy() for i in range(6 )]
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""CPU""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
snake_case = [mem.copy() for i in range(4 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""GPU""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""Model""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
snake_case = []
snake_case = []
snake_case = []
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
snake_case = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
model_cpu_arr.append(__snake_case )
self.add(*__snake_case , *__snake_case , *__snake_case )
snake_case = [mem.copy() for i in range(6 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""Loaded Checkpoint""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
checkpoint.move_to([3, 0.5, 0] )
self.add(__snake_case )
snake_case = []
snake_case = []
for i, rect in enumerate(__snake_case ):
snake_case = fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
ckpt_arr.append(__snake_case )
snake_case = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__snake_case )
self.add(*__snake_case , *__snake_case )
snake_case = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
snake_case = MarkupText(
f'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__snake_case )
snake_case = MarkupText(
f'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
snake_case = [meta_mem.copy() for i in range(6 )]
snake_case = [meta_mem.copy() for i in range(6 )]
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
snake_case = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
snake_case = Text("""Disk""" , font_size=24 )
snake_case = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(__snake_case , run_time=3 ) , Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
snake_case = []
for i, rect in enumerate(__snake_case ):
snake_case = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(FadeOut(__snake_case ) )
snake_case = MarkupText(f'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case , run_time=3 ) )
self.play(
FadeOut(__snake_case , __snake_case , *__snake_case , *__snake_case ) , )
self.wait()
| 369
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Iterable[int] ) -> None:
'''simple docstring'''
lowercase : Node | None =None
for i in sorted(UpperCAmelCase , reverse=UpperCAmelCase ):
lowercase : Optional[Any] =Node(UpperCAmelCase , self.head )
def __iter__( self : Any ) -> Iterator[int]:
'''simple docstring'''
lowercase : Dict =self.head
while node:
yield node.data
lowercase : List[Any] =node.next_node
def __len__( self : List[str] ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : Tuple ) -> str:
'''simple docstring'''
return " -> ".join([str(UpperCAmelCase ) for node in self] )
def lowercase_ ( __A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 8
|
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8
| 1
|
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=0.2 , UpperCamelCase__=0.2 ) -> Any:
lowerCamelCase : Optional[Any] = bp_numa
lowerCamelCase : Union[str, Any] = bp_numa
lowerCamelCase : Any = bp_numa
lowerCamelCase : Optional[Any] = conva_get[:2]
lowerCamelCase : Any = conva_get[2]
lowerCamelCase : List[str] = size_pa
lowerCamelCase : Union[str, Any] = rate_w
lowerCamelCase : str = rate_t
lowerCamelCase : int = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
lowerCamelCase : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
lowerCamelCase : List[str] = -2 * np.random.rand(self.conva[1] ) + 1
lowerCamelCase : Dict = -2 * np.random.rand(self.num_bpa ) + 1
lowerCamelCase : List[Any] = -2 * np.random.rand(self.num_bpa ) + 1
def _lowercase ( self , UpperCamelCase__ ) -> Dict:
# save model dict with pickle
lowerCamelCase : Union[str, Any] = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(UpperCamelCase__ , "wb" ) as f:
pickle.dump(UpperCamelCase__ , UpperCamelCase__ )
print(F'''Model saved: {save_path}''' )
@classmethod
def _lowercase ( cls , UpperCamelCase__ ) -> Any:
# read saved model
with open(UpperCamelCase__ , "rb" ) as f:
lowerCamelCase : Any = pickle.load(UpperCamelCase__ ) # noqa: S301
lowerCamelCase : Dict = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
lowerCamelCase : Tuple = model_dic.get("size_pooling1" )
lowerCamelCase : List[str] = model_dic.get("num_bp1" )
lowerCamelCase : Union[str, Any] = model_dic.get("num_bp2" )
lowerCamelCase : Optional[int] = model_dic.get("num_bp3" )
lowerCamelCase : str = model_dic.get("rate_weight" )
lowerCamelCase : List[Any] = model_dic.get("rate_thre" )
# create model instance
lowerCamelCase : str = CNN(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# modify model parameter
lowerCamelCase : int = model_dic.get("w_conv1" )
lowerCamelCase : List[str] = model_dic.get("wkj" )
lowerCamelCase : int = model_dic.get("vji" )
lowerCamelCase : int = model_dic.get("thre_conv1" )
lowerCamelCase : Tuple = model_dic.get("thre_bp2" )
lowerCamelCase : Tuple = model_dic.get("thre_bp3" )
return conv_ins
def _lowercase ( self , UpperCamelCase__ ) -> Optional[int]:
return 1 / (1 + np.exp(-1 * x ))
def _lowercase ( self , UpperCamelCase__ ) -> Optional[int]:
return round(UpperCamelCase__ , 3 )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[str]:
# convolution process
lowerCamelCase : Optional[Any] = convs[0]
lowerCamelCase : Union[str, Any] = convs[1]
lowerCamelCase : Union[str, Any] = np.shape(UpperCamelCase__ )[0]
# get the data slice of original image data, data_focus
lowerCamelCase : List[Any] = []
for i_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__ ):
for j_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase__ ):
lowerCamelCase : Optional[int] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCamelCase__ )
# calculate the feature map of every single kernel, and saved as list of matrix
lowerCamelCase : int = []
lowerCamelCase : List[Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(UpperCamelCase__ ):
lowerCamelCase : Dict = []
for i_focus in range(len(UpperCamelCase__ ) ):
lowerCamelCase : int = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCamelCase__ ) )
lowerCamelCase : Optional[int] = np.asmatrix(UpperCamelCase__ ).reshape(
UpperCamelCase__ , UpperCamelCase__ )
data_featuremap.append(UpperCamelCase__ )
# expanding the data slice to One dimenssion
lowerCamelCase : Tuple = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCamelCase__ ) )
lowerCamelCase : Union[str, Any] = np.asarray(UpperCamelCase__ )
return focus_list, data_featuremap
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__="average_pool" ) -> Tuple:
# pooling process
lowerCamelCase : Union[str, Any] = len(featuremaps[0] )
lowerCamelCase : List[Any] = int(size_map / size_pooling )
lowerCamelCase : Optional[int] = []
for i_map in range(len(UpperCamelCase__ ) ):
lowerCamelCase : str = featuremaps[i_map]
lowerCamelCase : Dict = []
for i_focus in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
for j_focus in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : str = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCamelCase__ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCamelCase__ ) )
lowerCamelCase : Union[str, Any] = np.asmatrix(UpperCamelCase__ ).reshape(UpperCamelCase__ , UpperCamelCase__ )
featuremap_pooled.append(UpperCamelCase__ )
return featuremap_pooled
def _lowercase ( self , UpperCamelCase__ ) -> str:
# expanding three dimension data to one dimension list
lowerCamelCase : List[str] = []
for i in range(len(UpperCamelCase__ ) ):
lowerCamelCase : Optional[int] = np.shape(data[i] )
lowerCamelCase : str = data[i].reshape(1 , shapes[0] * shapes[1] )
lowerCamelCase : Optional[int] = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCamelCase__ )
lowerCamelCase : Dict = np.asarray(UpperCamelCase__ )
return data_expanded
def _lowercase ( self , UpperCamelCase__ ) -> List[Any]:
# expanding matrix to one dimension list
lowerCamelCase : Tuple = np.asarray(UpperCamelCase__ )
lowerCamelCase : List[Any] = np.shape(UpperCamelCase__ )
lowerCamelCase : Dict = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
lowerCamelCase : List[str] = []
lowerCamelCase : List[str] = 0
for i_map in range(UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = np.ones((size_map, size_map) )
for i in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
for j in range(0 , UpperCamelCase__ , UpperCamelCase__ ):
lowerCamelCase : Union[str, Any] = pd_pool[
i_pool
]
lowerCamelCase : Optional[int] = i_pool + 1
lowerCamelCase : Optional[int] = np.multiply(
UpperCamelCase__ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(UpperCamelCase__ )
return pd_all
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=bool ) -> Tuple:
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(UpperCamelCase__ )) )
print((" - - Shape: Teach_Data ", np.shape(UpperCamelCase__ )) )
lowerCamelCase : Dict = 0
lowerCamelCase : Dict = []
lowerCamelCase : Any = 1_0000
while rp < n_repeat and mse >= error_accuracy:
lowerCamelCase : Union[str, Any] = 0
print(F'''-------------Learning Time {rp}--------------''' )
for p in range(len(UpperCamelCase__ ) ):
# print('------------Learning Image: %d--------------'%p)
lowerCamelCase : Any = np.asmatrix(datas_train[p] )
lowerCamelCase : List[str] = np.asarray(datas_teach[p] )
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase : List[Any] = self.pooling(UpperCamelCase__ , self.size_poolinga )
lowerCamelCase : int = np.shape(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = self._expand(UpperCamelCase__ )
lowerCamelCase : List[str] = data_bp_input
lowerCamelCase : List[Any] = np.dot(UpperCamelCase__ , self.vji.T ) - self.thre_bpa
lowerCamelCase : Any = self.sig(UpperCamelCase__ )
lowerCamelCase : Optional[Any] = np.dot(UpperCamelCase__ , self.wkj.T ) - self.thre_bpa
lowerCamelCase : int = self.sig(UpperCamelCase__ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
lowerCamelCase : Optional[int] = np.multiply(
(data_teach - bp_outa) , np.multiply(UpperCamelCase__ , (1 - bp_outa) ) )
lowerCamelCase : Union[str, Any] = np.multiply(
np.dot(UpperCamelCase__ , self.wkj ) , np.multiply(UpperCamelCase__ , (1 - bp_outa) ) )
lowerCamelCase : Optional[Any] = np.dot(UpperCamelCase__ , self.vji )
lowerCamelCase : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
lowerCamelCase : Union[str, Any] = pd_conva_pooled.T.getA().tolist()
lowerCamelCase : str = self._calculate_gradient_from_pool(
UpperCamelCase__ , UpperCamelCase__ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
lowerCamelCase : Any = self._expand_mat(pd_conva_all[k_conv] )
lowerCamelCase : Any = self.rate_weight * np.dot(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : Optional[int] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
lowerCamelCase : int = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
lowerCamelCase : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
lowerCamelCase : Union[str, Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
lowerCamelCase : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
lowerCamelCase : int = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
lowerCamelCase : str = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
lowerCamelCase : List[Any] = rp + 1
lowerCamelCase : Any = error_count / patterns
all_mse.append(UpperCamelCase__ )
def draw_error():
lowerCamelCase : Optional[int] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(UpperCamelCase__ , "+-" )
plt.plot(UpperCamelCase__ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(UpperCamelCase__ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F''' - - Mse: {mse:.6f}''') )
if draw_e:
draw_error()
return mse
def _lowercase ( self , UpperCamelCase__ ) -> int:
# model predict
lowerCamelCase : str = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(UpperCamelCase__ )) )
for p in range(len(UpperCamelCase__ ) ):
lowerCamelCase : List[Any] = np.asmatrix(datas_test[p] )
lowerCamelCase , lowerCamelCase : List[str] = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase : List[str] = self.pooling(UpperCamelCase__ , self.size_poolinga )
lowerCamelCase : Union[str, Any] = self._expand(UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = data_bp_input
lowerCamelCase : Any = bp_outa * self.vji.T - self.thre_bpa
lowerCamelCase : List[Any] = self.sig(UpperCamelCase__ )
lowerCamelCase : Dict = bp_outa * self.wkj.T - self.thre_bpa
lowerCamelCase : Optional[Any] = self.sig(UpperCamelCase__ )
produce_out.extend(bp_outa.getA().tolist() )
lowerCamelCase : Dict = [list(map(self.do_round , UpperCamelCase__ ) ) for each in produce_out]
return np.asarray(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ ) -> List[Any]:
# return the data of image after convoluting process so we can check it out
lowerCamelCase : Any = np.asmatrix(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Tuple = self.convolute(
UpperCamelCase__ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
lowerCamelCase : Union[str, Any] = self.pooling(UpperCamelCase__ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 311
|
from statistics import mean, stdev
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 3 ) -> list:
lowerCamelCase : Optional[int] = min(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Union[str, Any] = max(_SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) ,_SCREAMING_SNAKE_CASE ) for x in data]
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = 3 ) -> list:
lowerCamelCase : Union[str, Any] = mean(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = stdev(_SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) ,_SCREAMING_SNAKE_CASE ) for x in data]
| 311
| 1
|
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
_snake_case = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def lowercase__ ( self , _UpperCAmelCase):
if isinstance(_lowercase , _lowercase):
lowerCAmelCase_ = [label.strip() for label in labels.split(''',''') if label.strip()]
return labels
def __call__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
if len(_lowercase) == 0 or len(_lowercase) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''')
if hypothesis_template.format(labels[0]) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(_lowercase))
if isinstance(_lowercase , _lowercase):
lowerCAmelCase_ = [sequences]
lowerCAmelCase_ = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(_lowercase)] for label in labels])
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase_ )
class UpperCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase=ZeroShotClassificationArgumentHandler() , *_UpperCAmelCase , **_UpperCAmelCase):
lowerCAmelCase_ = args_parser
super().__init__(*_lowercase , **_lowercase)
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''')
@property
def lowercase__ ( self):
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail'''):
return ind
return -1
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=TruncationStrategy.ONLY_FIRST , **_UpperCAmelCase):
lowerCAmelCase_ = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''')
lowerCAmelCase_ = self.tokenizer.eos_token
try:
lowerCAmelCase_ = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , padding=_lowercase , truncation=_lowercase , )
except Exception as e:
if "too short" in str(_lowercase):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
lowerCAmelCase_ = self.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_tensors=_lowercase , padding=_lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowercase__ ( self , **_UpperCAmelCase):
if kwargs.get('''multi_class''' , _lowercase) is not None:
lowerCAmelCase_ = kwargs['multi_class']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''')
lowerCAmelCase_ = {}
if "candidate_labels" in kwargs:
lowerCAmelCase_ = self._args_parser._parse_labels(kwargs['''candidate_labels'''])
if "hypothesis_template" in kwargs:
lowerCAmelCase_ = kwargs['hypothesis_template']
lowerCAmelCase_ = {}
if "multi_label" in kwargs:
lowerCAmelCase_ = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase , ):
if len(_lowercase) == 0:
pass
elif len(_lowercase) == 1 and "candidate_labels" not in kwargs:
lowerCAmelCase_ = args[0]
else:
raise ValueError(f'Unable to understand extra arguments {args}')
return super().__call__(_lowercase , **_lowercase)
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase="This example is {}."):
lowerCAmelCase_ = self._args_parser(_lowercase , _lowercase , _lowercase)
for i, (candidate_label, sequence_pair) in enumerate(zip(_lowercase , _lowercase)):
lowerCAmelCase_ = self._parse_and_tokenize([sequence_pair])
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(_lowercase) - 1,
**model_input,
}
def lowercase__ ( self , _UpperCAmelCase):
lowerCAmelCase_ = inputs['candidate_label']
lowerCAmelCase_ = inputs['sequence']
lowerCAmelCase_ = {k: inputs[k] for k in self.tokenizer.model_input_names}
lowerCAmelCase_ = self.model(**_lowercase)
lowerCAmelCase_ = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def lowercase__ ( self , _UpperCAmelCase , _UpperCAmelCase=False):
lowerCAmelCase_ = [outputs['candidate_label'] for outputs in model_outputs]
lowerCAmelCase_ = [outputs['sequence'] for outputs in model_outputs]
lowerCAmelCase_ = np.concatenate([output['''logits'''].numpy() for output in model_outputs])
lowerCAmelCase_ = logits.shape[0]
lowerCAmelCase_ = len(_lowercase)
lowerCAmelCase_ = N // n
lowerCAmelCase_ = logits.reshape((num_sequences, n, -1))
if multi_label or len(_lowercase) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
lowerCAmelCase_ = self.entailment_id
lowerCAmelCase_ = -1 if entailment_id == 0 else 0
lowerCAmelCase_ = reshaped_outputs[..., [contradiction_id, entailment_id]]
lowerCAmelCase_ = np.exp(_lowercase) / np.exp(_lowercase).sum(-1 , keepdims=_lowercase)
lowerCAmelCase_ = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
lowerCAmelCase_ = reshaped_outputs[..., self.entailment_id]
lowerCAmelCase_ = np.exp(_lowercase) / np.exp(_lowercase).sum(-1 , keepdims=_lowercase)
lowerCAmelCase_ = list(reversed(scores[0].argsort()))
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 707
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 413
| 0
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase = {
"""gpt-neox-20b""": 2_048,
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , __UpperCamelCase : int=None , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Tuple="<|endoftext|>" , __UpperCamelCase : int="<|endoftext|>" , __UpperCamelCase : Dict="<|endoftext|>" , __UpperCamelCase : Union[str, Any]=False , **__UpperCamelCase : Union[str, Any] , ) -> Any:
super().__init__(
__UpperCamelCase , __UpperCamelCase , tokenizer_file=__UpperCamelCase , unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , add_prefix_space=__UpperCamelCase , **__UpperCamelCase , )
_UpperCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __UpperCamelCase ) != add_prefix_space:
_UpperCamelCase = getattr(__UpperCamelCase , pre_tok_state.pop('''type''' ) )
_UpperCamelCase = add_prefix_space
_UpperCamelCase = pre_tok_class(**__UpperCamelCase )
_UpperCamelCase = add_prefix_space
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
_UpperCamelCase = self._tokenizer.model.save(__UpperCamelCase , name=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : "Conversation" ) -> List[int]:
_UpperCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 420
|
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase_ ( _lowercase):
snake_case__ = '''encoder-decoder'''
snake_case__ = True
def __init__( self : str , **__UpperCamelCase : str ) -> Union[str, Any]:
super().__init__(**__UpperCamelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_UpperCamelCase = kwargs.pop('''encoder''' )
_UpperCamelCase = encoder_config.pop('''model_type''' )
_UpperCamelCase = kwargs.pop('''decoder''' )
_UpperCamelCase = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
_UpperCamelCase = AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = AutoConfig.for_model(__UpperCamelCase , **__UpperCamelCase )
_UpperCamelCase = True
@classmethod
def _UpperCamelCase ( cls : List[str] , __UpperCamelCase : PretrainedConfig , __UpperCamelCase : PretrainedConfig , **__UpperCamelCase : Any ) -> PretrainedConfig:
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
_UpperCamelCase = True
_UpperCamelCase = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.encoder.to_dict()
_UpperCamelCase = self.decoder.to_dict()
_UpperCamelCase = self.__class__.model_type
return output
| 420
| 1
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a ( UpperCamelCase__ ):
lowercase_ : Union[str, Any] = 'Salesforce/blip-image-captioning-base'
lowercase_ : Any = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
lowercase_ : Any = 'image_captioner'
lowercase_ : Union[str, Any] = AutoModelForVisionaSeq
lowercase_ : List[str] = ['image']
lowercase_ : Optional[int] = ['text']
def __init__( self : Tuple , *snake_case__ : str , **snake_case__ : Tuple ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*__A , **__A )
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=__A , return_tensors="pt" )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__A )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[str] ):
"""simple docstring"""
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0].strip()
| 715
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class a ( __UpperCAmelCase ):
lowercase_ : BigBirdConfig
lowercase_ : jnp.dtype = jnp.floataa
lowercase_ : bool = True
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
super().setup()
__lowerCAmelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Optional[Any] , *snake_case__ : List[str] , **snake_case__ : str ):
"""simple docstring"""
__lowerCAmelCase = super().__call__(*snake_case__ , **snake_case__ )
__lowerCAmelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class a ( __UpperCAmelCase ):
lowercase_ : List[str] = FlaxBigBirdForNaturalQuestionsModule
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: Optional[Any] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
def cross_entropy(UpperCamelCase: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int]=None ):
__lowerCAmelCase = logits.shape[-1]
__lowerCAmelCase = (labels[..., None] == jnp.arange(UpperCamelCase )[None]).astype("f4" )
__lowerCAmelCase = jax.nn.log_softmax(UpperCamelCase , axis=-1 )
__lowerCAmelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__lowerCAmelCase = reduction(UpperCamelCase )
return loss
__lowerCAmelCase = partial(UpperCamelCase , reduction=jnp.mean )
__lowerCAmelCase = cross_entropy(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = cross_entropy(UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = cross_entropy(UpperCamelCase , UpperCamelCase )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class a :
lowercase_ : str = "google/bigbird-roberta-base"
lowercase_ : int = 3_000
lowercase_ : int = 10_500
lowercase_ : int = 128
lowercase_ : int = 3
lowercase_ : int = 1
lowercase_ : int = 5
# tx_args
lowercase_ : float = 3e-5
lowercase_ : float = 0.0
lowercase_ : int = 20_000
lowercase_ : float = 0.0095
lowercase_ : str = "bigbird-roberta-natural-questions"
lowercase_ : str = "training-expt"
lowercase_ : str = "data/nq-training.jsonl"
lowercase_ : str = "data/nq-validation.jsonl"
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
os.makedirs(self.base_dir , exist_ok=snake_case__ )
__lowerCAmelCase = os.path.join(self.base_dir , self.save_dir )
__lowerCAmelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class a :
lowercase_ : int
lowercase_ : int = 4_096 # no dynamic padding on TPUs
def __call__( self : List[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
__lowerCAmelCase = self.collate_fn(snake_case__ )
__lowerCAmelCase = jax.tree_util.tree_map(snake_case__ , snake_case__ )
return batch
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Dict ):
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase = self.fetch_inputs(features["input_ids"] )
__lowerCAmelCase = {
"input_ids": jnp.array(snake_case__ , dtype=jnp.intaa ),
"attention_mask": jnp.array(snake_case__ , dtype=jnp.intaa ),
"start_labels": jnp.array(features["start_token"] , dtype=jnp.intaa ),
"end_labels": jnp.array(features["end_token"] , dtype=jnp.intaa ),
"pooled_labels": jnp.array(features["category"] , dtype=jnp.intaa ),
}
return batch
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : list ):
"""simple docstring"""
__lowerCAmelCase = [self._fetch_inputs(snake_case__ ) for ids in input_ids]
return zip(*snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : list ):
"""simple docstring"""
__lowerCAmelCase = [1 for _ in range(len(snake_case__ ) )]
while len(snake_case__ ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def _UpperCAmelCase ( UpperCamelCase: Tuple , UpperCamelCase: Dict , UpperCamelCase: Optional[Any]=None ):
"""simple docstring"""
if seed is not None:
__lowerCAmelCase = dataset.shuffle(seed=UpperCamelCase )
for i in range(len(UpperCamelCase ) // batch_size ):
__lowerCAmelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCamelCase )
@partial(jax.pmap , axis_name="batch" )
def _UpperCAmelCase ( UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] , **UpperCamelCase: List[str] ):
"""simple docstring"""
def loss_fn(UpperCamelCase: Dict ):
__lowerCAmelCase = model_inputs.pop("start_labels" )
__lowerCAmelCase = model_inputs.pop("end_labels" )
__lowerCAmelCase = model_inputs.pop("pooled_labels" )
__lowerCAmelCase = state.apply_fn(**UpperCamelCase , params=UpperCamelCase , dropout_rng=UpperCamelCase , train=UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = outputs
return state.loss_fn(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , )
__lowerCAmelCase , __lowerCAmelCase = jax.random.split(UpperCamelCase )
__lowerCAmelCase = jax.value_and_grad(UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase = grad_fn(state.params )
__lowerCAmelCase = jax.lax.pmean({"loss": loss} , axis_name="batch" )
__lowerCAmelCase = jax.lax.pmean(UpperCamelCase , "batch" )
__lowerCAmelCase = state.apply_gradients(grads=UpperCamelCase )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="batch" )
def _UpperCAmelCase ( UpperCamelCase: Optional[Any] , **UpperCamelCase: List[str] ):
"""simple docstring"""
__lowerCAmelCase = model_inputs.pop("start_labels" )
__lowerCAmelCase = model_inputs.pop("end_labels" )
__lowerCAmelCase = model_inputs.pop("pooled_labels" )
__lowerCAmelCase = state.apply_fn(**UpperCamelCase , params=state.params , train=UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = outputs
__lowerCAmelCase = state.loss_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = jax.lax.pmean({"loss": loss} , axis_name="batch" )
return metrics
class a ( train_state.TrainState ):
lowercase_ : Callable = struct.field(pytree_node=__UpperCAmelCase )
@dataclass
class a :
lowercase_ : Args
lowercase_ : Callable
lowercase_ : Callable
lowercase_ : Callable
lowercase_ : Callable
lowercase_ : wandb
lowercase_ : Callable = None
def UpperCAmelCase__ ( self : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : str=None ):
"""simple docstring"""
__lowerCAmelCase = model.params
__lowerCAmelCase = TrainState.create(
apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , loss_fn=snake_case__ , )
if ckpt_dir is not None:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = restore_checkpoint(snake_case__ , snake_case__ )
__lowerCAmelCase = {
"lr": args.lr,
"init_lr": args.init_lr,
"warmup_steps": args.warmup_steps,
"num_train_steps": num_train_steps,
"weight_decay": args.weight_decay,
}
__lowerCAmelCase , __lowerCAmelCase = build_tx(**snake_case__ )
__lowerCAmelCase = train_state.TrainState(
step=snake_case__ , apply_fn=model.__call__ , params=snake_case__ , tx=snake_case__ , opt_state=snake_case__ , )
__lowerCAmelCase = args
__lowerCAmelCase = data_collator
__lowerCAmelCase = lr
__lowerCAmelCase = params
__lowerCAmelCase = jax_utils.replicate(snake_case__ )
return state
def UpperCAmelCase__ ( self : str , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = self.args
__lowerCAmelCase = len(snake_case__ ) // args.batch_size
__lowerCAmelCase = jax.random.PRNGKey(0 )
__lowerCAmelCase = jax.random.split(snake_case__ , jax.device_count() )
for epoch in range(args.max_epochs ):
__lowerCAmelCase = jnp.array(0 , dtype=jnp.floataa )
__lowerCAmelCase = get_batched_dataset(snake_case__ , args.batch_size , seed=snake_case__ )
__lowerCAmelCase = 0
for batch in tqdm(snake_case__ , total=snake_case__ , desc=F"Running EPOCH-{epoch}" ):
__lowerCAmelCase = self.data_collator(snake_case__ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = self.train_step_fn(snake_case__ , snake_case__ , **snake_case__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
if i % args.logging_steps == 0:
__lowerCAmelCase = jax_utils.unreplicate(state.step )
__lowerCAmelCase = running_loss.item() / i
__lowerCAmelCase = self.scheduler_fn(state_step - 1 )
__lowerCAmelCase = self.evaluate(snake_case__ , snake_case__ )
__lowerCAmelCase = {
"step": state_step.item(),
"eval_loss": eval_loss.item(),
"tr_loss": tr_loss,
"lr": lr.item(),
}
tqdm.write(str(snake_case__ ) )
self.logger.log(snake_case__ , commit=snake_case__ )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"-e{epoch}-s{i}" , state=snake_case__ )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : Any , snake_case__ : Optional[Any] ):
"""simple docstring"""
__lowerCAmelCase = get_batched_dataset(snake_case__ , self.args.batch_size )
__lowerCAmelCase = len(snake_case__ ) // self.args.batch_size
__lowerCAmelCase = jnp.array(0 , dtype=jnp.floataa )
__lowerCAmelCase = 0
for batch in tqdm(snake_case__ , total=snake_case__ , desc="Evaluating ... " ):
__lowerCAmelCase = self.data_collator(snake_case__ )
__lowerCAmelCase = self.val_step_fn(snake_case__ , **snake_case__ )
running_loss += jax_utils.unreplicate(metrics["loss"] )
i += 1
return running_loss / i
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Any ):
"""simple docstring"""
__lowerCAmelCase = jax_utils.unreplicate(snake_case__ )
print(F"SAVING CHECKPOINT IN {save_dir}" , end=" ... " )
self.model_save_fn(snake_case__ , params=state.params )
with open(os.path.join(snake_case__ , "opt_state.msgpack" ) , "wb" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(snake_case__ , "args.joblib" ) )
joblib.dump(self.data_collator , os.path.join(snake_case__ , "data_collator.joblib" ) )
with open(os.path.join(snake_case__ , "training_state.json" ) , "w" ) as f:
json.dump({"step": state.step.item()} , snake_case__ )
print("DONE" )
def _UpperCAmelCase ( UpperCamelCase: str , UpperCamelCase: List[Any] ):
"""simple docstring"""
print(F"RESTORING CHECKPOINT FROM {save_dir}" , end=" ... " )
with open(os.path.join(UpperCamelCase , "flax_model.msgpack" ) , "rb" ) as f:
__lowerCAmelCase = from_bytes(state.params , f.read() )
with open(os.path.join(UpperCamelCase , "opt_state.msgpack" ) , "rb" ) as f:
__lowerCAmelCase = from_bytes(state.opt_state , f.read() )
__lowerCAmelCase = joblib.load(os.path.join(UpperCamelCase , "args.joblib" ) )
__lowerCAmelCase = joblib.load(os.path.join(UpperCamelCase , "data_collator.joblib" ) )
with open(os.path.join(UpperCamelCase , "training_state.json" ) , "r" ) as f:
__lowerCAmelCase = json.load(UpperCamelCase )
__lowerCAmelCase = training_state["step"]
print("DONE" )
return params, opt_state, step, args, data_collator
def _UpperCAmelCase ( UpperCamelCase: Any , UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Dict ):
"""simple docstring"""
__lowerCAmelCase = num_train_steps - warmup_steps
__lowerCAmelCase = optax.linear_schedule(init_value=UpperCamelCase , end_value=UpperCamelCase , transition_steps=UpperCamelCase )
__lowerCAmelCase = optax.linear_schedule(init_value=UpperCamelCase , end_value=1e-7 , transition_steps=UpperCamelCase )
__lowerCAmelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def _UpperCAmelCase ( UpperCamelCase: Union[str, Any] , UpperCamelCase: str , UpperCamelCase: List[Any] , UpperCamelCase: Optional[int] , UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
def weight_decay_mask(UpperCamelCase: int ):
__lowerCAmelCase = traverse_util.flatten_dict(UpperCamelCase )
__lowerCAmelCase = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCamelCase )
__lowerCAmelCase = scheduler_fn(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
__lowerCAmelCase = optax.adamw(learning_rate=UpperCamelCase , weight_decay=UpperCamelCase , mask=UpperCamelCase )
return tx, lr
| 376
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : List[Any] = {
'''configuration_xlm_roberta_xl''': [
'''XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaXLConfig''',
'''XLMRobertaXLOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[int] = [
'''XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaXLForCausalLM''',
'''XLMRobertaXLForMaskedLM''',
'''XLMRobertaXLForMultipleChoice''',
'''XLMRobertaXLForQuestionAnswering''',
'''XLMRobertaXLForSequenceClassification''',
'''XLMRobertaXLForTokenClassification''',
'''XLMRobertaXLModel''',
'''XLMRobertaXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
snake_case : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 445
|
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
class _snake_case ( _snake_case ):
def __init__( self , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=512 , _lowerCamelCase="cls" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
a :Tuple = project_dim
a :Optional[int] = pooler_fn
a :int = learn_encoder
a :int = use_attention_mask
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = [r'pooler', r'logit_scale']
SCREAMING_SNAKE_CASE__ = [r'position_ids', r'predictions.decoder.bias']
SCREAMING_SNAKE_CASE__ = 'roberta'
SCREAMING_SNAKE_CASE__ = RobertaSeriesConfig
def __init__( self , _lowerCamelCase ):
super().__init__(_lowerCamelCase )
a :Tuple = XLMRobertaModel(_lowerCamelCase )
a :Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
a :Optional[int] = getattr(_lowerCamelCase , '''has_pre_transformation''' , _lowerCamelCase )
if self.has_pre_transformation:
a :Tuple = nn.Linear(config.hidden_size , config.project_dim )
a :Union[str, Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ):
a :Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
a :int = self.base_model(
input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , position_ids=_lowerCamelCase , head_mask=_lowerCamelCase , inputs_embeds=_lowerCamelCase , encoder_hidden_states=_lowerCamelCase , encoder_attention_mask=_lowerCamelCase , output_attentions=_lowerCamelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_lowerCamelCase , )
if self.has_pre_transformation:
a :Optional[int] = outputs['''hidden_states'''][-2]
a :List[Any] = self.pre_LN(_lowerCamelCase )
a :Optional[Any] = self.transformation_pre(_lowerCamelCase )
return TransformationModelOutput(
projection_state=_lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
a :List[str] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_lowerCamelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 445
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
def A ( A_ : int ):
if num <= 0:
snake_case : List[Any] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(A_ )
snake_case : Optional[Any] = [True] * (num + 1)
snake_case : List[str] = []
snake_case : List[Any] = 2
snake_case : str = int(math.sqrt(A_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(A_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , A_ ):
if sieve[i] is True:
snake_case : int = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(A_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 555
|
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def A ( A_ : str , A_ : str ):
if "xprophetnet" in prophetnet_checkpoint_path:
snake_case : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(A_ )
snake_case, snake_case : Tuple = XLMProphetNetForConditionalGeneration.from_pretrained(
A_ , output_loading_info=A_ )
else:
snake_case : Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(A_ )
snake_case, snake_case : List[Any] = ProphetNetForConditionalGeneration.from_pretrained(
A_ , output_loading_info=A_ )
snake_case : Union[str, Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
snake_case : str = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
snake_case : Optional[Any] = key.split('''.''' )
if attributes[0] == "lm_head":
snake_case : Optional[int] = prophet
snake_case : Union[str, Any] = prophet_old
else:
snake_case : Optional[int] = prophet.prophetnet
snake_case : Any = prophet_old.model
snake_case : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
snake_case : List[str] = mapping[attribute]
if not hasattr(A_ , A_ ) and len(A_ ) > 0:
snake_case : str = attribute
elif hasattr(A_ , A_ ):
snake_case : Any = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
snake_case : Optional[Any] = old_model.weight
logger.info(F"""{attribute} is initialized.""" )
snake_case : Tuple = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
snake_case : List[str] = old_model.bias
logger.info(F"""{attribute} is initialized""" )
snake_case : Tuple = True
break
elif attribute in special_keys and hasattr(A_ , '''in_proj_weight''' ):
snake_case : Union[str, Any] = old_model.in_proj_weight.shape[0] // 3
snake_case : Any = getattr(A_ , A_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
snake_case : Tuple = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
snake_case : List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
snake_case : str = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
snake_case : Any = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
snake_case : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
snake_case : List[str] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
snake_case : Optional[Any] = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
snake_case : List[Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
snake_case : Any = True
break
if attribute.isdigit():
snake_case : Optional[Any] = model[int(A_ )]
snake_case : List[str] = old_model[int(A_ )]
else:
snake_case : Optional[Any] = getattr(A_ , A_ )
if old_attribute == "":
snake_case : Union[str, Any] = old_model
else:
if not hasattr(A_ , A_ ):
raise ValueError(F"""{old_model} does not have {old_attribute}""" )
snake_case : Tuple = getattr(A_ , A_ )
if not is_key_init:
raise ValueError(F"""{key} was not correctly initialized!""" )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(A_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--prophetnet_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 555
| 1
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( UpperCAmelCase_ ):
"""simple docstring"""
A_ = """"""
A_ = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , _lowerCamelCase = None , _lowerCamelCase = None , **_lowerCamelCase , )-> str:
super().__init__(self , **UpperCamelCase__ )
lowercase__ = repo_info
lowercase__ = token
lowercase__ = None
def snake_case_( self )-> str:
if self.dir_cache is None:
lowercase__ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase__ = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(UpperCamelCase__ ): {'''name''': str(UpperCamelCase__ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase = "rb" , **_lowerCamelCase , )-> Union[str, Any]:
if not isinstance(self.repo_info , UpperCamelCase__ ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
lowercase__ = hf_hub_url(self.repo_info.id , UpperCamelCase__ , revision=self.repo_info.sha )
return fsspec.open(
UpperCamelCase__ , mode=UpperCamelCase__ , headers=get_authentication_headers_for_url(UpperCamelCase__ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def snake_case_( self , _lowerCamelCase , **_lowerCamelCase )-> Optional[Any]:
self._get_dirs()
lowercase__ = self._strip_protocol(UpperCamelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(UpperCamelCase__ )
def snake_case_( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase )-> str:
self._get_dirs()
lowercase__ = PurePosixPath(path.strip('''/''' ) )
lowercase__ = {}
for p, f in self.dir_cache.items():
lowercase__ = PurePosixPath(p.strip('''/''' ) )
lowercase__ = p.parent
if root == path:
lowercase__ = f
lowercase__ = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 161
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase ={
"""configuration_rembert""": ["""REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RemBertConfig""", """RemBertOnnxConfig"""]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""RemBertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""RemBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RemBertForCausalLM""",
"""RemBertForMaskedLM""",
"""RemBertForMultipleChoice""",
"""RemBertForQuestionAnswering""",
"""RemBertForSequenceClassification""",
"""RemBertForTokenClassification""",
"""RemBertLayer""",
"""RemBertModel""",
"""RemBertPreTrainedModel""",
"""load_tf_weights_in_rembert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRemBertForCausalLM""",
"""TFRemBertForMaskedLM""",
"""TFRemBertForMultipleChoice""",
"""TFRemBertForQuestionAnswering""",
"""TFRemBertForSequenceClassification""",
"""TFRemBertForTokenClassification""",
"""TFRemBertLayer""",
"""TFRemBertModel""",
"""TFRemBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 337
| 0
|
'''simple docstring'''
import requests
def __a ( A__ , A__ ) -> None:
lowerCAmelCase = {"Content-Type": "application/json"}
lowerCAmelCase = requests.post(A__ , json={"text": message_body} , headers=A__ )
if response.status_code != 200:
lowerCAmelCase = (
"Request to slack returned an error "
f"{response.status_code}, the response is:\n{response.text}"
)
raise ValueError(A__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 159
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase : Union[str, Any] = TypeVar('T')
class _lowerCAmelCase ( Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : list[T] , SCREAMING_SNAKE_CASE : Callable[[T, T], T] ) -> None:
"""simple docstring"""
lowerCAmelCase = None
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = [any_type for _ in range(self.N )] + arr
lowerCAmelCase = fnc
self.build()
def __A ( self : Dict ) -> None:
"""simple docstring"""
for p in range(self.N - 1 , 0 , -1 ):
lowerCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : T ) -> None:
"""simple docstring"""
p += self.N
lowerCAmelCase = v
while p > 1:
lowerCAmelCase = p // 2
lowerCAmelCase = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> T | None: # noqa: E741
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = l + self.N, r + self.N
lowerCAmelCase = None
while l <= r:
if l % 2 == 1:
lowerCAmelCase = self.st[l] if res is None else self.fn(SCREAMING_SNAKE_CASE , self.st[l] )
if r % 2 == 0:
lowerCAmelCase = self.st[r] if res is None else self.fn(SCREAMING_SNAKE_CASE , self.st[r] )
lowerCAmelCase , lowerCAmelCase = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase : List[Any] = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase : Dict = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase : Optional[Any] = SegmentTree(test_array, min)
lowercase : Union[str, Any] = SegmentTree(test_array, max)
lowercase : Tuple = SegmentTree(test_array, lambda a, b: a + b)
def __a ( ) -> None:
for i in range(len(A__ ) ):
for j in range(A__ , len(A__ ) ):
lowerCAmelCase = reduce(A__ , test_array[i : j + 1] )
lowerCAmelCase = reduce(A__ , test_array[i : j + 1] )
lowerCAmelCase = reduce(lambda A__ , A__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(A__ , A__ )
assert max_range == max_segment_tree.query(A__ , A__ )
assert sum_range == sum_segment_tree.query(A__ , A__ )
test_all_segments()
for index, value in test_updates.items():
lowercase : List[Any] = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 159
| 1
|
"""simple docstring"""
import argparse
import importlib
from pathlib import Path
# Test all the extensions added in the setup
__A : Tuple = [
"kernels/rwkv/wkv_cuda.cu",
"kernels/rwkv/wkv_op.cpp",
"kernels/deformable_detr/ms_deform_attn.h",
"kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh",
"models/graphormer/algos_graphormer.pyx",
]
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
# Test all the extensions added in the setup
for file in FILES_TO_FIND:
if not (transformers_path / file).exists():
return False
return True
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--check_lib", action="store_true", help="Whether to check the build or the actual package.")
__A : Optional[int] = parser.parse_args()
if args.check_lib:
__A : int = importlib.import_module("transformers")
__A : List[Any] = Path(transformers_module.__file__).parent
else:
__A : int = Path.cwd() / "build/lib/transformers"
if not test_custom_files_are_present(transformers_path):
raise ValueError("The built release does not contain the custom files. Fix this before going further!")
| 656
|
"""simple docstring"""
import os
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) )
A__ : str =os.path.join(UpperCamelCase , "triangle.txt" )
with open(UpperCamelCase ) as f:
A__ : Optional[int] =f.readlines()
A__ : str =[]
for line in triangle:
A__ : Union[str, Any] =[]
for number in line.strip().split(" " ):
numbers_from_line.append(int(UpperCamelCase ) )
a.append(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
for j in range(len(a[i] ) ):
A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0
A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase , UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 656
| 1
|
from math import pow, sqrt
def A_ ( *snake_case : float ) -> bool:
'''simple docstring'''
__UpperCamelCase = len(snake_case ) > 0 and all(value > 0.0 for value in values )
return result
def A_ ( snake_case : float , snake_case : float ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case , snake_case )
else ValueError('''Input Error: Molar mass values must greater than 0.''' )
)
def A_ ( snake_case : float , snake_case : float , snake_case : float ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case , snake_case , snake_case )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def A_ ( snake_case : float , snake_case : float , snake_case : float ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(snake_case , snake_case , snake_case )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def A_ ( snake_case : float , snake_case : float , snake_case : float ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(snake_case , snake_case , snake_case )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
def A_ ( snake_case : float , snake_case : float , snake_case : float ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(snake_case , snake_case , snake_case )
else ValueError(
'''Input Error: Molar mass and effusion rate values must greater than 0.''' )
)
| 451
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def A_ ( snake_case : List[str] ) -> List[str]:
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(snake_case : Optional[int] , snake_case : List[Any]="" , snake_case : str="." ):
__UpperCamelCase = []
for k, v in d.items():
__UpperCamelCase = parent_key + sep + k if parent_key else k
if isinstance(snake_case , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(snake_case , snake_case , sep=snake_case ).items() )
else:
items.append((new_key, v) )
return dict(snake_case )
__UpperCamelCase = argparse.Namespace()
with open(snake_case , '''r''' ) as yaml_file:
try:
__UpperCamelCase = yaml.load(snake_case , Loader=yaml.FullLoader )
__UpperCamelCase = flatten_yaml_as_dict(snake_case )
for k, v in flat_cfg.items():
setattr(snake_case , snake_case , snake_case )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(snake_case , str(snake_case ) ) )
return config
def A_ ( snake_case : List[Any] , snake_case : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCamelCase = MobileViTVaConfig()
__UpperCamelCase = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__UpperCamelCase = 1000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__UpperCamelCase = 21000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase = 384
else:
__UpperCamelCase = 256
__UpperCamelCase = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__UpperCamelCase = 151
__UpperCamelCase = 512
__UpperCamelCase = '''ade20k-id2label.json'''
__UpperCamelCase = True
elif task_name.startswith('''voc_''' ):
__UpperCamelCase = 21
__UpperCamelCase = 512
__UpperCamelCase = '''pascal-voc-id2label.json'''
__UpperCamelCase = True
# orig_config
__UpperCamelCase = load_orig_config_file(snake_case )
assert getattr(snake_case , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__UpperCamelCase = getattr(snake_case , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(snake_case , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__UpperCamelCase = getattr(snake_case , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__UpperCamelCase = getattr(snake_case , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__UpperCamelCase = '''huggingface/label-files'''
__UpperCamelCase = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase = {int(snake_case ): v for k, v in idalabel.items()}
__UpperCamelCase = idalabel
__UpperCamelCase = {v: k for k, v in idalabel.items()}
return config
def A_ ( snake_case : List[Any] , snake_case : int , snake_case : Any ) -> str:
'''simple docstring'''
__UpperCamelCase = dct.pop(snake_case )
__UpperCamelCase = val
def A_ ( snake_case : int , snake_case : List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if base_model:
__UpperCamelCase = ''''''
else:
__UpperCamelCase = '''mobilevitv2.'''
__UpperCamelCase = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__UpperCamelCase = k[8:]
else:
__UpperCamelCase = k
if ".block." in k:
__UpperCamelCase = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__UpperCamelCase = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__UpperCamelCase = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__UpperCamelCase = k_new.replace('''conv_1.''' , f"{model_prefix}conv_stem." )
for i in [1, 2]:
if f"layer_{i}." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}." , f"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
__UpperCamelCase = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__UpperCamelCase = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"layer_{i}.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.0." , f"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if f"layer_{i}.1.local_rep.0." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.0." , f"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if f"layer_{i}.1.local_rep.1." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.local_rep.1." , f"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
__UpperCamelCase = [0, 1]
elif i == 4:
__UpperCamelCase = [0, 1, 2, 3]
elif i == 5:
__UpperCamelCase = [0, 1, 2]
for j in j_in:
if f"layer_{i}.1.global_rep.{j}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j}." , f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if f"layer_{i}.1.global_rep.{j+1}." in k:
__UpperCamelCase = k_new.replace(
f"layer_{i}.1.global_rep.{j+1}." , f"{model_prefix}encoder.layer.{i-1}.layernorm." )
if f"layer_{i}.1.conv_proj." in k:
__UpperCamelCase = k_new.replace(f"layer_{i}.1.conv_proj." , f"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__UpperCamelCase = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__UpperCamelCase = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__UpperCamelCase = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__UpperCamelCase = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__UpperCamelCase = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def A_ ( snake_case : List[str] ) -> str:
'''simple docstring'''
__UpperCamelCase = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(snake_case )
for k in keys_to_ignore:
state_dict.pop(snake_case , snake_case )
def A_ ( ) -> str:
'''simple docstring'''
__UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__UpperCamelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
@torch.no_grad()
def A_ ( snake_case : Dict , snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[int] ) -> int:
'''simple docstring'''
__UpperCamelCase = get_mobilevitva_config(snake_case , snake_case )
# load original state_dict
__UpperCamelCase = torch.load(snake_case , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__UpperCamelCase = MobileViTVaForSemanticSegmentation(snake_case ).eval()
__UpperCamelCase = False
else:
__UpperCamelCase = MobileViTVaForImageClassification(snake_case ).eval()
__UpperCamelCase = False
# remove and rename some keys of load the original model
__UpperCamelCase = checkpoint
remove_unused_keys(snake_case )
__UpperCamelCase = create_rename_keys(snake_case , base_model=snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
# load modified state_dict
model.load_state_dict(snake_case )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCamelCase = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__UpperCamelCase = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase = model(**snake_case )
# verify classification model
if task_name.startswith('''imagenet''' ):
__UpperCamelCase = outputs.logits
__UpperCamelCase = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__UpperCamelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] )
assert torch.allclose(logits[0, :3] , snake_case , atol=1e-4 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(f"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
lowercase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
lowercase__ : Tuple = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 451
| 1
|
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 10 , UpperCamelCase__: int = 1_000 , UpperCamelCase__: bool = True ):
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: int ):
return int((number_a + number_a) / 2 )
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int , UpperCamelCase__: int , UpperCamelCase__: int ):
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(UpperCamelCase__: int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
SCREAMING_SNAKE_CASE__ = lower
SCREAMING_SNAKE_CASE__ = higher
SCREAMING_SNAKE_CASE__ = []
while True:
SCREAMING_SNAKE_CASE__ = get_avg(UpperCamelCase__ , UpperCamelCase__ )
last_numbers.append(UpperCamelCase__ )
if answer(UpperCamelCase__ ) == "low":
SCREAMING_SNAKE_CASE__ = number
elif answer(UpperCamelCase__ ) == "high":
SCREAMING_SNAKE_CASE__ = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''' )
print(f'''details : {last_numbers!s}''' )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = int(input("""Enter lower value : """ ).strip() )
SCREAMING_SNAKE_CASE__ = int(input("""Enter high value : """ ).strip() )
SCREAMING_SNAKE_CASE__ = int(input("""Enter value to guess : """ ).strip() )
guess_the_number(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 6
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase_ = {"""configuration_swin""": ["""SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SwinConfig""", """SwinOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwinForImageClassification""",
"""SwinForMaskedImageModeling""",
"""SwinModel""",
"""SwinPreTrainedModel""",
"""SwinBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSwinForImageClassification""",
"""TFSwinForMaskedImageModeling""",
"""TFSwinModel""",
"""TFSwinPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 74
| 0
|
from typing import Any
import numpy as np
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return np.array_equal(__a, matrix.conjugate().T )
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
snake_case_ = v.conjugate().T
snake_case_ = v_star.dot(__a )
assert isinstance(__a, np.ndarray )
return (v_star_dot.dot(__a )) / (v_star.dot(__a ))
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
snake_case_ = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
snake_case_ = np.array([[1], [2], [3]] )
assert is_hermitian(__a ), F"{a} is not hermitian."
print(rayleigh_quotient(__a, __a ) )
snake_case_ = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(__a ), F"{a} is not hermitian."
assert rayleigh_quotient(__a, __a ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 720
|
'''simple docstring'''
import random
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> tuple:
'''simple docstring'''
snake_case_ ,snake_case_ ,snake_case_ = [], [], []
for element in data:
if element < pivot:
less.append(__UpperCAmelCase )
elif element > pivot:
greater.append(__UpperCAmelCase )
else:
equal.append(__UpperCAmelCase )
return less, equal, greater
def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase ) -> str:
'''simple docstring'''
if index >= len(__UpperCAmelCase ) or index < 0:
return None
snake_case_ = items[random.randint(0, len(__UpperCAmelCase ) - 1 )]
snake_case_ = 0
snake_case_ ,snake_case_ ,snake_case_ = _partition(__UpperCAmelCase, __UpperCAmelCase )
snake_case_ = len(__UpperCAmelCase )
snake_case_ = len(__UpperCAmelCase )
# index is the pivot
if m <= index < m + count:
return pivot
# must be in smaller
elif m > index:
return quick_select(__UpperCAmelCase, __UpperCAmelCase )
# must be in larger
else:
return quick_select(__UpperCAmelCase, index - (m + count) )
| 593
| 0
|
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase ( UpperCAmelCase__ ):
_lowercase: Tuple = None
_lowercase: Tuple = None
@property
def lowercase__ ( self : Any ) -> Optional[int]:
return self.feat_extract_tester.prepare_feat_extract_dict()
def lowercase__ ( self : Any ) -> List[Any]:
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowercase , """feature_size""" ) )
self.assertTrue(hasattr(__lowercase , """sampling_rate""" ) )
self.assertTrue(hasattr(__lowercase , """padding_value""" ) )
def lowercase__ ( self : List[Any] ) -> Optional[Any]:
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__lowercase ) == len(__lowercase ) for x, y in zip(__lowercase , processed_features[input_name] ) ) )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowercase )
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def lowercase__ ( self : int ) -> List[Any]:
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowercase )
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def lowercase__ ( self : Tuple ) -> List[Any]:
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__lowercase )
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
_lowerCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_lowerCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def lowercase__ ( self : str , __snake_case : Dict=False ) -> int:
def _inputs_have_equal_length(__snake_case : Dict ):
_lowerCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(__lowercase ) != length:
return False
return True
def _inputs_are_equal(__snake_case : Dict , __snake_case : Optional[int] ):
if len(__lowercase ) != len(__lowercase ):
return False
for input_slice_a, input_slice_a in zip(__lowercase , __lowercase ):
if not np.allclose(np.asarray(__lowercase ) , np.asarray(__lowercase ) , atol=1E-3 ):
return False
return True
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=__lowercase )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = self.feat_extract_tester.seq_length_diff
_lowerCAmelCase = self.feat_extract_tester.max_seq_length + pad_diff
_lowerCAmelCase = self.feat_extract_tester.min_seq_length
_lowerCAmelCase = self.feat_extract_tester.batch_size
_lowerCAmelCase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_lowerCAmelCase = feat_extract.pad(__lowercase , padding=__lowercase )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(__lowercase , padding="""longest""" )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(__lowercase , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(__lowercase , padding="""longest""" , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__lowercase ):
feat_extract.pad(__lowercase , padding="""max_length""" )[input_name]
_lowerCAmelCase = feat_extract.pad(
__lowercase , padding="""max_length""" , max_length=__lowercase , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__lowercase ) )
self.assertTrue(_inputs_have_equal_length(__lowercase ) )
self.assertTrue(_inputs_have_equal_length(__lowercase ) )
self.assertTrue(_inputs_are_equal(__lowercase , __lowercase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_lowerCAmelCase = feat_extract.pad(__lowercase , pad_to_multiple_of=10 )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(__lowercase , padding="""longest""" , pad_to_multiple_of=10 )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
__lowercase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=__lowercase )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
__lowercase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=__lowercase , return_tensors="""np""" , )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(all(len(__lowercase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__lowercase , __lowercase ) )
_lowerCAmelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__lowercase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_lowerCAmelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1E-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1E-3 )
def lowercase__ ( self : Optional[Any] , __snake_case : Union[str, Any]=False ) -> Dict:
def _inputs_have_equal_length(__snake_case : Union[str, Any] ):
_lowerCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(__lowercase ) != length:
return False
return True
def _inputs_are_equal(__snake_case : str , __snake_case : str ):
if len(__lowercase ) != len(__lowercase ):
return False
for input_slice_a, input_slice_a in zip(__lowercase , __lowercase ):
if not np.allclose(np.asarray(__lowercase ) , np.asarray(__lowercase ) , atol=1E-3 ):
return False
return True
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=__lowercase )
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_lowerCAmelCase = feat_extract.pad(
__lowercase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=__lowercase )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(__lowercase , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__lowercase ) )
self.assertFalse(_inputs_have_equal_length(__lowercase ) )
# truncate to smallest with np
_lowerCAmelCase = feat_extract.pad(
__lowercase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=__lowercase , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
__lowercase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__lowercase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__lowercase ) )
# truncate to middle
_lowerCAmelCase = feat_extract.pad(
__lowercase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=__lowercase , return_tensors="""np""" , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
__lowercase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=__lowercase )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
__lowercase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
_lowerCAmelCase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__lowercase ) )
self.assertTrue(_inputs_have_equal_length(__lowercase ) )
self.assertTrue(_inputs_are_equal(__lowercase , __lowercase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__lowercase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowercase ):
feat_extract.pad(__lowercase , truncation=__lowercase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowercase ):
feat_extract.pad(__lowercase , padding="""longest""" , truncation=__lowercase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__lowercase ):
feat_extract.pad(__lowercase , padding="""longest""" , truncation=__lowercase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__lowercase ):
feat_extract.pad(__lowercase , padding="""max_length""" , truncation=__lowercase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_lowerCAmelCase = 12
_lowerCAmelCase = feat_extract.pad(
__lowercase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__lowercase , truncation=__lowercase , )
_lowerCAmelCase = input_a[input_name]
_lowerCAmelCase = feat_extract.pad(
__lowercase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__lowercase , )
_lowerCAmelCase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_lowerCAmelCase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_lowerCAmelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__lowercase ) )
self.assertFalse(_inputs_have_equal_length(__lowercase ) )
def lowercase__ ( self : Tuple ) -> Optional[Any]:
self._check_padding(numpify=__lowercase )
def lowercase__ ( self : List[str] ) -> Tuple:
self._check_padding(numpify=__lowercase )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
self._check_truncation(numpify=__lowercase )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
self._check_truncation(numpify=__lowercase )
@require_torch
def lowercase__ ( self : Tuple ) -> Dict:
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(__lowercase , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase = feat_extract.pad(__lowercase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
@require_tf
def lowercase__ ( self : List[str] ) -> List[Any]:
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(__lowercase , padding="""longest""" , return_tensors="""np""" )[input_name]
_lowerCAmelCase = feat_extract.pad(__lowercase , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def lowercase__ ( self : int ) -> Dict:
_lowerCAmelCase = self.feat_extract_dict
_lowerCAmelCase = True
_lowerCAmelCase = self.feature_extraction_class(**__lowercase )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = [len(__lowercase ) for x in speech_inputs]
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = feat_extract.pad(__lowercase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __lowercase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __lowercase )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
_lowerCAmelCase = self.feat_extract_dict
_lowerCAmelCase = True
_lowerCAmelCase = self.feature_extraction_class(**__lowercase )
_lowerCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_lowerCAmelCase = [len(__lowercase ) for x in speech_inputs]
_lowerCAmelCase = feat_extract.model_input_names[0]
_lowerCAmelCase = BatchFeature({input_name: speech_inputs} )
_lowerCAmelCase = min(__lowercase )
_lowerCAmelCase = feat_extract.pad(
__lowercase , padding="""max_length""" , max_length=__lowercase , truncation=__lowercase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __lowercase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 207
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
snake_case_ = old_name
if "patch_embed" in old_name:
snake_case_ , snake_case_ , snake_case_ = old_name.split("." )
if layer == "0":
snake_case_ = old_name.replace("0" , "convolution1" )
elif layer == "1":
snake_case_ = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
snake_case_ = old_name.replace("3" , "convolution2" )
else:
snake_case_ = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , _A ):
snake_case_ = R"\b\d{2}\b"
if bool(re.search(_A , _A ) ):
snake_case_ = re.search(R"\d\.\d\d." , _A ).group()
else:
snake_case_ = re.search(R"\d\.\d." , _A ).group()
if int(match[0] ) < 6:
snake_case_ = old_name.replace(_A , "" )
snake_case_ = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
snake_case_ = "intermediate_stages." + trimmed_name
else:
snake_case_ = old_name.replace(_A , "" )
if int(match[2] ) < num_meta4D_last_stage:
snake_case_ = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
snake_case_ = str(int(match[2] ) - num_meta4D_last_stage )
snake_case_ = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
snake_case_ = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
snake_case_ = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
snake_case_ = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
snake_case_ = trimmed_name.replace("fc2" , "linear_out" )
snake_case_ = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , _A ):
snake_case_ = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
snake_case_ = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case_ = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case_ = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
snake_case_ = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
snake_case_ = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
snake_case_ = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
snake_case_ = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case_ = new_name.replace("norm" , "layernorm" )
snake_case_ = "efficientformer." + new_name
else:
snake_case_ = "efficientformer.encoder." + new_name
return new_name
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
for key in checkpoint.copy().keys():
snake_case_ = checkpoint.pop(_A )
snake_case_ = val
return checkpoint
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case_ = Image.open(requests.get(_A , stream=_A ).raw )
return image
def lowerCamelCase__ ( _A , _A , _A , _A ):
'''simple docstring'''
snake_case_ = torch.load(_A , map_location="cpu" )["model"]
snake_case_ = EfficientFormerConfig.from_json_file(_A )
snake_case_ = EfficientFormerForImageClassificationWithTeacher(_A )
snake_case_ = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
snake_case_ = config.depths[-1] - config.num_metaad_blocks + 1
snake_case_ = convert_torch_checkpoint(_A , _A )
model.load_state_dict(_A )
model.eval()
snake_case_ = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
snake_case_ = prepare_img()
snake_case_ = 256
snake_case_ = 224
snake_case_ = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
snake_case_ = processor(images=_A , return_tensors="pt" ).pixel_values
# original processing pipeline
snake_case_ = Compose(
[
Resize(_A , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(_A ),
ToTensor(),
Normalize(_A , _A ),
] )
snake_case_ = image_transforms(_A ).unsqueeze(0 )
assert torch.allclose(_A , _A )
snake_case_ = model(_A )
snake_case_ = outputs.logits
snake_case_ = (1, 1000)
if "l1" in model_name:
snake_case_ = torch.Tensor(
[-0.13_12, 0.43_53, -1.04_99, -0.51_24, 0.41_83, -0.67_93, -1.37_77, -0.08_93, -0.73_58, -2.43_28] )
assert torch.allclose(logits[0, :10] , _A , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case_ = torch.Tensor(
[-1.31_50, -1.54_56, -1.25_56, -0.84_96, -0.71_27, -0.78_97, -0.97_28, -0.30_52, 0.37_51, -0.31_27] )
assert torch.allclose(logits[0, :10] , _A , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case_ = torch.Tensor(
[-1.02_83, -1.41_31, -0.56_44, -1.31_15, -0.57_85, -1.20_49, -0.75_28, 0.19_92, -0.38_22, -0.08_78] )
assert logits.shape == expected_shape
else:
raise ValueError(
f"Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7" )
# Save Checkpoints
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
print(f"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
processor.save_pretrained(_A )
print(f"Processor successfuly saved at {pytorch_dump_path}" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message="Add model" , use_temp_dir=_A , )
processor.push_to_hub(
repo_id=f"Bearnardd/{pytorch_dump_path}" , commit_message="Add image processor" , use_temp_dir=_A , )
if __name__ == "__main__":
lowercase__ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
lowercase__ : Optional[Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 376
| 0
|
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def __A ( _A ):
"""simple docstring"""
if not is_accelerate_available():
return method
__a = version.parse(accelerate.__version__ ).base_version
if version.parse(_A ) < version.parse("0.17.0" ):
return method
def wrapper(self , *_A , **_A ):
if hasattr(self , "_hf_hook" ) and hasattr(self._hf_hook , "pre_forward" ):
self._hf_hook.pre_forward(self )
return method(self , *_A , **_A )
return wrapper
| 700
|
from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __A ( ):
"""simple docstring"""
__a = Github(os.environ["GITHUB_TOKEN"] )
__a = g.get_repo("huggingface/transformers" )
__a = repo.get_issues(state="open" )
for issue in open_issues:
__a = sorted([comment for comment in issue.get_comments()] , key=lambda _A : i.created_at , reverse=_A )
__a = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 525
| 0
|
'''simple docstring'''
import io
import math
from typing import Dict, Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
get_image_size,
infer_channel_dimension_format,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_vision_available, logging
from ...utils.import_utils import requires_backends
if is_vision_available():
import textwrap
from PIL import Image, ImageDraw, ImageFont
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
lowerCAmelCase__ = False
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''ybelkada/fonts'''
def _A ( ):
"""simple docstring"""
if is_torch_available() and not is_torch_greater_or_equal_than_1_11:
raise ImportError(
F"You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use "
'''Pix2StructImageProcessor. Please upgrade torch.''' )
def _A ( A__ , A__ , A__ ):
"""simple docstring"""
requires_backends(A__ , ['''torch'''] )
_check_torch_version()
__lowercase = image_tensor.unsqueeze(0 )
__lowercase = torch.nn.functional.unfold(A__ , (patch_height, patch_width) , stride=(patch_height, patch_width) )
__lowercase = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , A__ , A__ , -1 )
__lowercase = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape(
image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , )
return patches.unsqueeze(0 )
def _A ( A__ , A__ = 36 , A__ = "black" , A__ = "white" , A__ = 5 , A__ = 5 , A__ = 5 , A__ = 5 , A__ = None , A__ = None , ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Add new lines so that each line is no more than 80 characters.
__lowercase = textwrap.TextWrapper(width=80 )
__lowercase = wrapper.wrap(text=A__ )
__lowercase = '''\n'''.join(A__ )
if font_bytes is not None and font_path is None:
__lowercase = io.BytesIO(A__ )
elif font_path is not None:
__lowercase = font_path
else:
__lowercase = hf_hub_download(A__ , '''Arial.TTF''' )
__lowercase = ImageFont.truetype(A__ , encoding='''UTF-8''' , size=A__ )
# Use a temporary canvas to determine the width and height in pixels when
# rendering the text.
__lowercase = ImageDraw.Draw(Image.new('''RGB''' , (1, 1) , A__ ) )
__lowercase , __lowercase , __lowercase , __lowercase = temp_draw.textbbox((0, 0) , A__ , A__ )
# Create the actual image with a bit of padding around the text.
__lowercase = text_width + left_padding + right_padding
__lowercase = text_height + top_padding + bottom_padding
__lowercase = Image.new('''RGB''' , (image_width, image_height) , A__ )
__lowercase = ImageDraw.Draw(A__ )
draw.text(xy=(left_padding, top_padding) , text=A__ , fill=A__ , font=A__ )
return image
def _A ( A__ , A__ , **A__ ):
"""simple docstring"""
requires_backends(A__ , '''vision''' )
# Convert to PIL image if necessary
__lowercase = to_pil_image(A__ )
__lowercase = render_text(A__ , **A__ )
__lowercase = max(header_image.width , image.width )
__lowercase = int(image.height * (new_width / image.width) )
__lowercase = int(header_image.height * (new_width / header_image.width) )
__lowercase = Image.new('''RGB''' , (new_width, new_height + new_header_height) , '''white''' )
new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) )
new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) )
# Convert back to the original framework if necessary
__lowercase = to_numpy_array(A__ )
if infer_channel_dimension_format(A__ ) == ChannelDimension.LAST:
__lowercase = to_channel_dimension_format(A__ , ChannelDimension.LAST )
return new_image
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = ['flattened_patches']
def __init__( self : Any ,lowercase__ : bool = True ,lowercase__ : bool = True ,lowercase__ : Dict[str, int] = None ,lowercase__ : int = 2_0_4_8 ,lowercase__ : bool = False ,**lowercase__ : List[str] ,):
super().__init__(**lowercase__ )
__lowercase = patch_size if patch_size is not None else {'''height''': 1_6, '''width''': 1_6}
__lowercase = do_normalize
__lowercase = do_convert_rgb
__lowercase = max_patches
__lowercase = is_vqa
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : np.ndarray ,lowercase__ : int ,lowercase__ : dict ,**lowercase__ : Tuple ):
requires_backends(self.extract_flattened_patches ,'''torch''' )
_check_torch_version()
# convert to torch
__lowercase = to_channel_dimension_format(lowercase__ ,ChannelDimension.FIRST )
__lowercase = torch.from_numpy(lowercase__ )
__lowercase , __lowercase = patch_size['''height'''], patch_size['''width''']
__lowercase , __lowercase = get_image_size(lowercase__ )
# maximize scale s.t.
__lowercase = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width) )
__lowercase = max(min(math.floor(scale * image_height / patch_height ) ,lowercase__ ) ,1 )
__lowercase = max(min(math.floor(scale * image_width / patch_width ) ,lowercase__ ) ,1 )
__lowercase = max(num_feasible_rows * patch_height ,1 )
__lowercase = max(num_feasible_cols * patch_width ,1 )
__lowercase = torch.nn.functional.interpolate(
image.unsqueeze(0 ) ,size=(resized_height, resized_width) ,mode='''bilinear''' ,align_corners=lowercase__ ,antialias=lowercase__ ,).squeeze(0 )
# [1, rows, columns, patch_height * patch_width * image_channels]
__lowercase = torch_extract_patches(lowercase__ ,lowercase__ ,lowercase__ )
__lowercase = patches.shape
__lowercase = patches_shape[1]
__lowercase = patches_shape[2]
__lowercase = patches_shape[3]
# [rows * columns, patch_height * patch_width * image_channels]
__lowercase = patches.reshape([rows * columns, depth] )
# [rows * columns, 1]
__lowercase = torch.arange(lowercase__ ).reshape([rows, 1] ).repeat(1 ,lowercase__ ).reshape([rows * columns, 1] )
__lowercase = torch.arange(lowercase__ ).reshape([1, columns] ).repeat(lowercase__ ,1 ).reshape([rows * columns, 1] )
# Offset by 1 so the ids do not contain zeros, which represent padding.
row_ids += 1
col_ids += 1
# Prepare additional patch features.
# [rows * columns, 1]
__lowercase = row_ids.to(torch.floataa )
__lowercase = col_ids.to(torch.floataa )
# [rows * columns, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.cat([row_ids, col_ids, patches] ,-1 )
# [max_patches, 2 + patch_height * patch_width * image_channels]
__lowercase = torch.nn.functional.pad(lowercase__ ,[0, 0, 0, max_patches - (rows * columns)] ).float()
__lowercase = to_numpy_array(lowercase__ )
return result
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : np.ndarray ,lowercase__ : Optional[Union[str, ChannelDimension]] = None ,**lowercase__ : List[Any] ):
if image.dtype == np.uinta:
__lowercase = image.astype(np.floataa )
# take mean across the whole `image`
__lowercase = np.mean(lowercase__ )
__lowercase = np.std(lowercase__ )
__lowercase = max(lowercase__ ,1.0 / math.sqrt(np.prod(image.shape ) ) )
return normalize(lowercase__ ,mean=lowercase__ ,std=lowercase__ ,**lowercase__ )
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : ImageInput ,lowercase__ : Optional[str] = None ,lowercase__ : bool = None ,lowercase__ : Optional[bool] = None ,lowercase__ : Optional[int] = None ,lowercase__ : Optional[Dict[str, int]] = None ,lowercase__ : Optional[Union[str, TensorType]] = None ,lowercase__ : ChannelDimension = ChannelDimension.FIRST ,**lowercase__ : List[Any] ,):
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__lowercase = patch_size if patch_size is not None else self.patch_size
__lowercase = max_patches if max_patches is not None else self.max_patches
__lowercase = self.is_vqa
if kwargs.get('''data_format''' ,lowercase__ ) is not None:
raise ValueError('''data_format is not an accepted input as the outputs are ''' )
__lowercase = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__lowercase = [convert_to_rgb(lowercase__ ) for image in images]
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowercase__ ) for image in images]
if is_vqa:
if header_text is None:
raise ValueError('''A header text must be provided for VQA models.''' )
__lowercase = kwargs.pop('''font_bytes''' ,lowercase__ )
__lowercase = kwargs.pop('''font_path''' ,lowercase__ )
if isinstance(lowercase__ ,lowercase__ ):
__lowercase = [header_text] * len(lowercase__ )
__lowercase = [
render_header(lowercase__ ,header_text[i] ,font_bytes=lowercase__ ,font_path=lowercase__ )
for i, image in enumerate(lowercase__ )
]
if do_normalize:
__lowercase = [self.normalize(image=lowercase__ ) for image in images]
# convert to torch tensor and permute
__lowercase = [
self.extract_flattened_patches(image=lowercase__ ,max_patches=lowercase__ ,patch_size=lowercase__ )
for image in images
]
# create attention mask in numpy
__lowercase = [(image.sum(axis=-1 ) != 0).astype(np.floataa ) for image in images]
__lowercase = BatchFeature(
data={'''flattened_patches''': images, '''attention_mask''': attention_masks} ,tensor_type=lowercase__ )
return encoded_outputs
| 41
|
from math import ceil
def __a ( SCREAMING_SNAKE_CASE = 1_0_0_1 ) -> int:
'''simple docstring'''
__UpperCAmelCase = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
__UpperCAmelCase = 2 * i + 1
__UpperCAmelCase = 2 * i
__UpperCAmelCase = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A_ : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 303
| 0
|
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class __lowercase :
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=0.2 , UpperCamelCase=0.2 ) -> Dict:
__a = bp_numa
__a = bp_numa
__a = bp_numa
__a = conva_get[:2]
__a = conva_get[2]
__a = size_pa
__a = rate_w
__a = rate_t
__a = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__a = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__a = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__a = -2 * np.random.rand(self.conva[1] ) + 1
__a = -2 * np.random.rand(self.num_bpa ) + 1
__a = -2 * np.random.rand(self.num_bpa ) + 1
def UpperCamelCase__ ( self , UpperCamelCase ) -> int:
# save model dict with pickle
__a = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(UpperCamelCase , 'wb' ) as f:
pickle.dump(UpperCamelCase , UpperCamelCase )
print(f"Model saved: {save_path}" )
@classmethod
def UpperCamelCase__ ( cls , UpperCamelCase ) -> Optional[int]:
# read saved model
with open(UpperCamelCase , 'rb' ) as f:
__a = pickle.load(UpperCamelCase ) # noqa: S301
__a = model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
__a = model_dic.get('size_pooling1' )
__a = model_dic.get('num_bp1' )
__a = model_dic.get('num_bp2' )
__a = model_dic.get('num_bp3' )
__a = model_dic.get('rate_weight' )
__a = model_dic.get('rate_thre' )
# create model instance
__a = CNN(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# modify model parameter
__a = model_dic.get('w_conv1' )
__a = model_dic.get('wkj' )
__a = model_dic.get('vji' )
__a = model_dic.get('thre_conv1' )
__a = model_dic.get('thre_bp2' )
__a = model_dic.get('thre_bp3' )
return conv_ins
def UpperCamelCase__ ( self , UpperCamelCase ) -> List[Any]:
return 1 / (1 + np.exp(-1 * x ))
def UpperCamelCase__ ( self , UpperCamelCase ) -> Optional[Any]:
return round(UpperCamelCase , 3 )
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
# convolution process
__a = convs[0]
__a = convs[1]
__a = np.shape(UpperCamelCase )[0]
# get the data slice of original image data, data_focus
__a = []
for i_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase ):
for j_focus in range(0 , size_data - size_conv + 1 , UpperCamelCase ):
__a = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCamelCase )
# calculate the feature map of every single kernel, and saved as list of matrix
__a = []
__a = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(UpperCamelCase ):
__a = []
for i_focus in range(len(UpperCamelCase ) ):
__a = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCamelCase ) )
__a = np.asmatrix(UpperCamelCase ).reshape(
UpperCamelCase , UpperCamelCase )
data_featuremap.append(UpperCamelCase )
# expanding the data slice to One dimenssion
__a = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCamelCase ) )
__a = np.asarray(UpperCamelCase )
return focus_list, data_featuremap
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase="average_pool" ) -> Optional[Any]:
# pooling process
__a = len(featuremaps[0] )
__a = int(size_map / size_pooling )
__a = []
for i_map in range(len(UpperCamelCase ) ):
__a = featuremaps[i_map]
__a = []
for i_focus in range(0 , UpperCamelCase , UpperCamelCase ):
for j_focus in range(0 , UpperCamelCase , UpperCamelCase ):
__a = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCamelCase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCamelCase ) )
__a = np.asmatrix(UpperCamelCase ).reshape(UpperCamelCase , UpperCamelCase )
featuremap_pooled.append(UpperCamelCase )
return featuremap_pooled
def UpperCamelCase__ ( self , UpperCamelCase ) -> List[str]:
# expanding three dimension data to one dimension list
__a = []
for i in range(len(UpperCamelCase ) ):
__a = np.shape(data[i] )
__a = data[i].reshape(1 , shapes[0] * shapes[1] )
__a = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCamelCase )
__a = np.asarray(UpperCamelCase )
return data_expanded
def UpperCamelCase__ ( self , UpperCamelCase ) -> Dict:
# expanding matrix to one dimension list
__a = np.asarray(UpperCamelCase )
__a = np.shape(UpperCamelCase )
__a = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
__a = []
__a = 0
for i_map in range(UpperCamelCase ):
__a = np.ones((size_map, size_map) )
for i in range(0 , UpperCamelCase , UpperCamelCase ):
for j in range(0 , UpperCamelCase , UpperCamelCase ):
__a = pd_pool[
i_pool
]
__a = i_pool + 1
__a = np.multiply(
UpperCamelCase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(UpperCamelCase )
return pd_all
def UpperCamelCase__ ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=bool ) -> int:
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(UpperCamelCase )) )
print((' - - Shape: Teach_Data ', np.shape(UpperCamelCase )) )
__a = 0
__a = []
__a = 1_0000
while rp < n_repeat and mse >= error_accuracy:
__a = 0
print(f"-------------Learning Time {rp}--------------" )
for p in range(len(UpperCamelCase ) ):
# print('------------Learning Image: %d--------------'%p)
__a = np.asmatrix(datas_train[p] )
__a = np.asarray(datas_teach[p] )
__a , __a = self.convolute(
UpperCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a = self.pooling(UpperCamelCase , self.size_poolinga )
__a = np.shape(UpperCamelCase )
__a = self._expand(UpperCamelCase )
__a = data_bp_input
__a = np.dot(UpperCamelCase , self.vji.T ) - self.thre_bpa
__a = self.sig(UpperCamelCase )
__a = np.dot(UpperCamelCase , self.wkj.T ) - self.thre_bpa
__a = self.sig(UpperCamelCase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__a = np.multiply(
(data_teach - bp_outa) , np.multiply(UpperCamelCase , (1 - bp_outa) ) )
__a = np.multiply(
np.dot(UpperCamelCase , self.wkj ) , np.multiply(UpperCamelCase , (1 - bp_outa) ) )
__a = np.dot(UpperCamelCase , self.vji )
__a = pd_i_all / (self.size_poolinga * self.size_poolinga)
__a = pd_conva_pooled.T.getA().tolist()
__a = self._calculate_gradient_from_pool(
UpperCamelCase , UpperCamelCase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__a = self._expand_mat(pd_conva_all[k_conv] )
__a = self.rate_weight * np.dot(UpperCamelCase , UpperCamelCase )
__a = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__a = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__a = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__a = self.vji + pd_j_all.T * bp_outa * self.rate_weight
__a = self.thre_bpa - pd_k_all * self.rate_thre
__a = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__a = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__a = rp + 1
__a = error_count / patterns
all_mse.append(UpperCamelCase )
def draw_error():
__a = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(UpperCamelCase , '+-' )
plt.plot(UpperCamelCase , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(UpperCamelCase , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def UpperCamelCase__ ( self , UpperCamelCase ) -> Union[str, Any]:
# model predict
__a = []
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(UpperCamelCase )) )
for p in range(len(UpperCamelCase ) ):
__a = np.asmatrix(datas_test[p] )
__a , __a = self.convolute(
UpperCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a = self.pooling(UpperCamelCase , self.size_poolinga )
__a = self._expand(UpperCamelCase )
__a = data_bp_input
__a = bp_outa * self.vji.T - self.thre_bpa
__a = self.sig(UpperCamelCase )
__a = bp_outa * self.wkj.T - self.thre_bpa
__a = self.sig(UpperCamelCase )
produce_out.extend(bp_outa.getA().tolist() )
__a = [list(map(self.do_round , UpperCamelCase ) ) for each in produce_out]
return np.asarray(UpperCamelCase )
def UpperCamelCase__ ( self , UpperCamelCase ) -> int:
# return the data of image after convoluting process so we can check it out
__a = np.asmatrix(UpperCamelCase )
__a , __a = self.convolute(
UpperCamelCase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__a = self.pooling(UpperCamelCase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 490
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/config.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/config.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"
),
"distilbert-base-uncased-finetuned-sst-2-english": (
"https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"
),
}
class __lowercase ( __magic_name__ ):
_a = """distilbert"""
_a = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self , UpperCamelCase=3_0522 , UpperCamelCase=512 , UpperCamelCase=False , UpperCamelCase=6 , UpperCamelCase=12 , UpperCamelCase=768 , UpperCamelCase=4 * 768 , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase="gelu" , UpperCamelCase=0.02 , UpperCamelCase=0.1 , UpperCamelCase=0.2 , UpperCamelCase=0 , **UpperCamelCase , ) -> int:
__a = vocab_size
__a = max_position_embeddings
__a = sinusoidal_pos_embds
__a = n_layers
__a = n_heads
__a = dim
__a = hidden_dim
__a = dropout
__a = attention_dropout
__a = activation
__a = initializer_range
__a = qa_dropout
__a = seq_classif_dropout
super().__init__(**UpperCamelCase , pad_token_id=UpperCamelCase )
class __lowercase ( __magic_name__ ):
@property
def UpperCamelCase__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__a = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__a = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 490
| 1
|
"""simple docstring"""
import os
def SCREAMING_SNAKE_CASE__ ( ):
"""simple docstring"""
with open(os.path.dirname(SCREAMING_SNAKE_CASE__ ) + """/p022_names.txt""" ) as file:
snake_case_ : Any = str(file.readlines()[0] )
snake_case_ : Optional[Any] = names.replace("""\"""" , """""" ).split(""",""" )
names.sort()
snake_case_ : str = 0
snake_case_ : str = 0
for i, name in enumerate(SCREAMING_SNAKE_CASE__ ):
for letter in name:
name_score += ord(SCREAMING_SNAKE_CASE__ ) - 6_4
total_score += (i + 1) * name_score
snake_case_ : Any = 0
return total_score
if __name__ == "__main__":
print(solution())
| 480
|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class __lowercase ( _UpperCAmelCase , unittest.TestCase):
"""simple docstring"""
_A : Optional[Any] = CTRLTokenizer
_A : Dict = False
_A : Any = False
def __UpperCamelCase (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Tuple = ["""adapt""", """re@@""", """a@@""", """apt""", """c@@""", """t""", """<unk>"""]
snake_case_ : int = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
snake_case_ : List[str] = ["""#version: 0.2""", """a p""", """ap t</w>""", """r e""", """a d""", """ad apt</w>""", """"""]
snake_case_ : Tuple = {"""unk_token""": """<unk>"""}
snake_case_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase__ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase__ ) )
def __UpperCamelCase (self , **lowercase__ ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def __UpperCamelCase (self , lowercase__ ):
snake_case_ : Tuple = """adapt react readapt apt"""
snake_case_ : Tuple = """adapt react readapt apt"""
return input_text, output_text
def __UpperCamelCase (self ):
snake_case_ : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : Tuple = """adapt react readapt apt"""
snake_case_ : List[str] = """adapt re@@ a@@ c@@ t re@@ adapt apt""".split()
snake_case_ : List[str] = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
snake_case_ : Union[str, Any] = tokens + [tokenizer.unk_token]
snake_case_ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowercase__ ) , lowercase__ )
| 480
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Union[str, Any] =logging.get_logger(__name__)
def A__ ( lowercase: List[Any], lowercase: Tuple=False ) -> str:
A : Union[str, Any] =[]
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A : List[Any] =[(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def A__ ( lowercase: Optional[int], lowercase: Optional[int], lowercase: Union[str, Any]=False ) -> Optional[int]:
for i in range(config.num_hidden_layers ):
if base_model:
A : Optional[Any] =''
else:
A : str ='vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A : Dict =state_dict.pop(F'blocks.{i}.attn.qkv.weight' )
A : str =state_dict.pop(F'blocks.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A : Optional[Any] =in_proj_weight[
: config.hidden_size, :
]
A : str =in_proj_bias[: config.hidden_size]
A : List[str] =in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A : Dict =in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A : Dict =in_proj_weight[
-config.hidden_size :, :
]
A : Any =in_proj_bias[-config.hidden_size :]
def A__ ( lowercase: Tuple ) -> Tuple:
A : List[str] =['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowercase, lowercase )
def A__ ( lowercase: Optional[int], lowercase: Optional[Any], lowercase: str ) -> Any:
A : str =dct.pop(lowercase )
A : Tuple =val
def A__ ( ) -> Optional[Any]:
A : int ='http://images.cocodataset.org/val2017/000000039769.jpg'
A : List[str] =Image.open(requests.get(lowercase, stream=lowercase ).raw )
return im
@torch.no_grad()
def A__ ( lowercase: str, lowercase: Union[str, Any], lowercase: Optional[int]=True ) -> Dict:
A : Any =ViTConfig()
# patch_size
if model_name[-1] == "8":
A : List[str] =8
# set labels if required
if not base_model:
A : Optional[int] =1_000
A : Optional[Any] ='huggingface/label-files'
A : Dict ='imagenet-1k-id2label.json'
A : int =json.load(open(hf_hub_download(lowercase, lowercase, repo_type='dataset' ), 'r' ) )
A : List[str] ={int(lowercase ): v for k, v in idalabel.items()}
A : Optional[int] =idalabel
A : Union[str, Any] ={v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A : str =384
A : List[Any] =1_536
A : Tuple =12
A : List[str] =6
# load original model from torch hub
A : List[str] =torch.hub.load('facebookresearch/dino:main', lowercase )
original_model.eval()
# load state_dict of original model, remove and rename some keys
A : int =original_model.state_dict()
if base_model:
remove_classification_head_(lowercase )
A : Dict =create_rename_keys(lowercase, base_model=lowercase )
for src, dest in rename_keys:
rename_key(lowercase, lowercase, lowercase )
read_in_q_k_v(lowercase, lowercase, lowercase )
# load HuggingFace model
if base_model:
A : List[str] =ViTModel(lowercase, add_pooling_layer=lowercase ).eval()
else:
A : Dict =ViTForImageClassification(lowercase ).eval()
model.load_state_dict(lowercase )
# Check outputs on an image, prepared by ViTImageProcessor
A : Optional[Any] =ViTImageProcessor()
A : str =image_processor(images=prepare_img(), return_tensors='pt' )
A : Tuple =encoding['pixel_values']
A : Optional[Any] =model(lowercase )
if base_model:
A : Dict =original_model(lowercase )
assert torch.allclose(lowercase, outputs.last_hidden_state[:, 0, :], atol=1e-1 )
else:
A : int =original_model(lowercase )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowercase, outputs.logits, atol=1e-3 )
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
_lowercase : Union[str, Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
_lowercase : List[Any] =parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 661
|
import math
def A__ ( lowercase: int ) -> list:
A : Optional[Any] =[True] * n
A : Tuple =False
A : List[Any] =False
A : Dict =True
for i in range(3, int(n**0.5 + 1 ), 2 ):
A : Dict =i * 2
while index < n:
A : Dict =False
A : Dict =index + i
A : Tuple =[2]
for i in range(3, lowercase, 2 ):
if is_prime[i]:
primes.append(lowercase )
return primes
def A__ ( lowercase: int = 999_966_663_333 ) -> int:
A : Optional[int] =math.floor(math.sqrt(lowercase ) ) + 100
A : Optional[int] =prime_sieve(lowercase )
A : Optional[Any] =0
A : List[Any] =0
A : Union[str, Any] =primes[prime_index]
while (last_prime**2) <= limit:
A : Tuple =primes[prime_index + 1]
A : Optional[int] =last_prime**2
A : Tuple =next_prime**2
# Get numbers divisible by lps(current)
A : int =lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A : List[Any] =upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A : Any =0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A : List[str] =next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 661
| 1
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
lowerCamelCase__ = "bart"
lowerCamelCase__ = True
@st.cache(allow_output_mutation=UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( ):
if LOAD_DENSE_INDEX:
A__ = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
A__ = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
A__ = qar_model.eval()
else:
A__ ,A__ = (None, None)
if MODEL_TYPE == "bart":
A__ = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
A__ = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
A__ = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
A__ = sas_model.eval()
else:
A__ ,A__ = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( ):
if LOAD_DENSE_INDEX:
A__ = faiss.StandardGpuResources()
A__ = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
A__ = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 128) , )
A__ = faiss.IndexFlatIP(128 )
A__ = faiss.index_cpu_to_gpu(UpperCamelCase , 1 , UpperCamelCase )
wikiaab_gpu_index_flat.add(UpperCamelCase ) # TODO fix for larger GPU
else:
A__ ,A__ = (None, None)
A__ = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( ):
A__ = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
A__ = elia["""train_eli5"""]
A__ = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 128) )
A__ = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(UpperCamelCase )
return (elia_train, eli5_train_q_index)
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = load_indexes()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = load_models()
lowerCamelCase__ , lowerCamelCase__ = load_train_data()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[int]=10 ):
A__ = embed_questions_for_retrieval([question] , UpperCamelCase , UpperCamelCase )
A__ ,A__ = eli5_train_q_index.search(UpperCamelCase , UpperCamelCase )
A__ = [elia_train[int(UpperCamelCase )] for i in I[0]]
return nn_examples
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Dict , UpperCamelCase : List[Any]="wiki40b" , UpperCamelCase : Tuple="dense" , UpperCamelCase : List[Any]=10 ):
if source == "none":
A__ ,A__ = (""" <P> """.join(["""""" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
A__ ,A__ = query_qa_dense_index(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
A__ ,A__ = query_es_index(
UpperCamelCase , UpperCamelCase , index_name="""english_wiki40b_snippets_100w""" , n_results=UpperCamelCase , )
A__ = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
A__ = """question: {} context: {}""".format(UpperCamelCase , UpperCamelCase )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCamelCase : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCamelCase : None),
} )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : int=64 , UpperCamelCase : Tuple=256 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Union[str, Any]=2 , UpperCamelCase : Tuple=0.95 , UpperCamelCase : Dict=0.8 ):
with torch.no_grad():
A__ = qa_sas_generate(
UpperCamelCase , UpperCamelCase , UpperCamelCase , num_answers=1 , num_beams=UpperCamelCase , min_len=UpperCamelCase , max_len=UpperCamelCase , do_sample=UpperCamelCase , temp=UpperCamelCase , top_p=UpperCamelCase , top_k=UpperCamelCase , max_input_length=1024 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
lowerCamelCase__ = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
lowerCamelCase__ = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
lowerCamelCase__ = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
lowerCamelCase__ = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
lowerCamelCase__ = st.sidebar.checkbox("Demo options")
if demo_options:
lowerCamelCase__ = st.sidebar.selectbox(
"",
action_list,
index=3,
)
lowerCamelCase__ = action_list.index(action_st)
lowerCamelCase__ = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
lowerCamelCase__ = show_type == "Show full text of passages"
else:
lowerCamelCase__ = 3
lowerCamelCase__ = True
lowerCamelCase__ = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
lowerCamelCase__ = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
lowerCamelCase__ = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
lowerCamelCase__ = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
lowerCamelCase__ = "wiki40b"
lowerCamelCase__ = "dense"
lowerCamelCase__ = "beam"
lowerCamelCase__ = 2
lowerCamelCase__ = 64
lowerCamelCase__ = 256
lowerCamelCase__ = None
lowerCamelCase__ = None
lowerCamelCase__ = st.sidebar.checkbox("Generation options")
if generate_options:
lowerCamelCase__ = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
lowerCamelCase__ = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
lowerCamelCase__ = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
lowerCamelCase__ = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
lowerCamelCase__ = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
lowerCamelCase__ = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.9_5, step=0.0_1, format=None, key=None
)
lowerCamelCase__ = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.0_1, format=None, key=None
)
lowerCamelCase__ = None
# start main text
lowerCamelCase__ = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
lowerCamelCase__ = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
lowerCamelCase__ = st.text_input("Enter your question here:", "")
else:
lowerCamelCase__ = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
lowerCamelCase__ , lowerCamelCase__ = make_support(question, source=wiki_source, method="dense", n_results=10)
lowerCamelCase__ , lowerCamelCase__ = make_support(question, source=wiki_source, method="sparse", n_results=10)
lowerCamelCase__ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
lowerCamelCase__ = support_list[:10]
lowerCamelCase__ = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
lowerCamelCase__ , lowerCamelCase__ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
lowerCamelCase__ , lowerCamelCase__ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
lowerCamelCase__ = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
lowerCamelCase__ = res[1].strip()
if sec_titles == "":
lowerCamelCase__ = "[{}]({})".format(res[0], wiki_url)
else:
lowerCamelCase__ = sec_titles.split(" & ")
lowerCamelCase__ = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
lowerCamelCase__ = find_nearest_training(question)
lowerCamelCase__ = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
lowerCamelCase__ = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
lowerCamelCase__ = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 574
|
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def _SCREAMING_SNAKE_CASE ( UpperCamelCase : str = "laptop" ):
A__ = F"""https://www.amazon.in/laptop/s?k={product}"""
A__ = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
A__ = BeautifulSoup(requests.get(UpperCamelCase , headers=UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
A__ = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ):
try:
A__ = item.ha.text
A__ = """https://www.amazon.in/""" + item.ha.a["""href"""]
A__ = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text
try:
A__ = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text
except AttributeError:
A__ = """Not available"""
try:
A__ = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1]
)
except AttributeError:
A__ = """"""
try:
A__ = float(
(
(
float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
- float(product_price.strip("""₹""" ).replace(""",""" , """""" ) )
)
/ float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) )
)
* 100 )
except ValueError:
A__ = float("""nan""" )
except AttributeError:
pass
A__ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
A__ = """ """
A__ = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase__ = "headphones"
get_amazon_product_data(product).to_csv(F'Amazon Product Data for {product}.csv')
| 574
| 1
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCAmelCase__ :
def __init__( self : str , _A : Tuple , _A : Tuple=13 , _A : List[Any]=7 , _A : Any=False , _A : Optional[Any]=True , _A : List[Any]=False , _A : Dict=False , _A : List[Any]=19 , _A : str=32 , _A : List[Any]=5 , _A : List[str]=4 , _A : Any=37 , _A : List[str]="gelu" , _A : int=0.1 , _A : Dict=0.1 , _A : List[Any]=512 , _A : Optional[int]=16 , _A : Optional[int]=2 , _A : str=0.02 , _A : List[Any]=3 , _A : Any=4 , _A : Optional[int]=None , ):
A__ : List[Any] = parent
A__ : Dict = batch_size
A__ : List[str] = seq_length
A__ : List[Any] = is_training
A__ : List[str] = use_input_mask
A__ : Union[str, Any] = use_token_type_ids
A__ : Tuple = use_labels
A__ : Optional[int] = vocab_size
A__ : Union[str, Any] = hidden_size
A__ : Union[str, Any] = num_hidden_layers
A__ : int = num_attention_heads
A__ : Optional[int] = intermediate_size
A__ : List[str] = hidden_act
A__ : Optional[Any] = hidden_dropout_prob
A__ : int = attention_probs_dropout_prob
A__ : Optional[int] = max_position_embeddings
A__ : Tuple = type_vocab_size
A__ : Optional[Any] = type_sequence_label_size
A__ : int = initializer_range
A__ : str = num_labels
A__ : str = num_choices
A__ : Dict = scope
def _lowercase ( self : int):
A__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
A__ : int = None
if self.use_input_mask:
A__ : List[str] = random_attention_mask([self.batch_size, self.seq_length])
A__ : int = None
A__ : Optional[Any] = None
A__ : Optional[int] = None
if self.use_labels:
A__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
A__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
A__ : int = ids_tensor([self.batch_size] , self.num_choices)
A__ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : List[Any]):
A__ : List[Any] = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=_A , esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False} , )
return config
def _lowercase ( self : Optional[int] , _A : Any , _A : List[str] , _A : str , _A : List[Any] , _A : Tuple , _A : Optional[Any]):
A__ : str = EsmForProteinFolding(config=_A).float()
model.to(_A)
model.eval()
A__ : Optional[Any] = model(_A , attention_mask=_A)
A__ : str = model(_A)
A__ : Tuple = model(_A)
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3))
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2))
def _lowercase ( self : str):
A__ : List[str] = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) : int = config_and_inputs
A__ : Union[str, Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
__A : Optional[int] = False
__A : Tuple = (EsmForProteinFolding,) if is_torch_available() else ()
__A : Tuple = ()
__A : Dict = {} if is_torch_available() else {}
__A : Tuple = False
def _lowercase ( self : Optional[Any]):
A__ : List[str] = EsmFoldModelTester(self)
A__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37)
def _lowercase ( self : Optional[int]):
self.config_tester.run_common_tests()
def _lowercase ( self : Dict):
A__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A)
@unittest.skip("Does not support attention outputs")
def _lowercase ( self : List[str]):
pass
@unittest.skip
def _lowercase ( self : List[Any]):
pass
@unittest.skip("Esm does not support embedding resizing")
def _lowercase ( self : List[Any]):
pass
@unittest.skip("Esm does not support embedding resizing")
def _lowercase ( self : Optional[int]):
pass
@unittest.skip("ESMFold does not support passing input embeds!")
def _lowercase ( self : List[str]):
pass
@unittest.skip("ESMFold does not support head pruning.")
def _lowercase ( self : Optional[int]):
pass
@unittest.skip("ESMFold does not support head pruning.")
def _lowercase ( self : Optional[Any]):
pass
@unittest.skip("ESMFold does not support head pruning.")
def _lowercase ( self : List[str]):
pass
@unittest.skip("ESMFold does not support head pruning.")
def _lowercase ( self : Union[str, Any]):
pass
@unittest.skip("ESMFold does not support head pruning.")
def _lowercase ( self : List[Any]):
pass
@unittest.skip("ESMFold does not output hidden states in the normal way.")
def _lowercase ( self : str):
pass
@unittest.skip("ESMfold does not output hidden states in the normal way.")
def _lowercase ( self : List[str]):
pass
@unittest.skip("ESMFold only has one output format.")
def _lowercase ( self : str):
pass
@unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality")
def _lowercase ( self : Optional[int]):
pass
@unittest.skip("ESMFold does not support input chunking.")
def _lowercase ( self : List[str]):
pass
@unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.")
def _lowercase ( self : Union[str, Any]):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation.")
def _lowercase ( self : str):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation.")
def _lowercase ( self : str):
pass
@unittest.skip("ESMFold doesn't support torchscript compilation.")
def _lowercase ( self : Dict):
pass
@unittest.skip("ESMFold doesn't support data parallel.")
def _lowercase ( self : Tuple):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def _lowercase ( self : Tuple):
pass
@require_torch
class lowerCAmelCase__ ( UpperCamelCase ):
@slow
def _lowercase ( self : int):
A__ : Dict = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1").float()
model.eval()
A__ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]])
A__ : str = model(_A)["positions"]
A__ : List[str] = torch.tensor([2.58_28, 0.79_93, -10.93_34] , dtype=torch.floataa)
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , _A , atol=1e-4))
| 182
|
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def snake_case__ ( __lowercase ) -> bool:
"""simple docstring"""
A__ : int = int(number**0.5 )
return number == sq * sq
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> tuple[int, int]:
"""simple docstring"""
A__ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
A__ : int = x_den * y_den * z_den
A__ : int = gcd(__lowercase , __lowercase )
top //= hcf
bottom //= hcf
return top, bottom
def snake_case__ ( __lowercase = 3_5 ) -> int:
"""simple docstring"""
A__ : set = set()
A__ : int
A__ : Fraction = Fraction(0 )
A__ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
A__ : Any = x_num * y_den + x_den * y_num
A__ : List[Any] = x_den * y_den
A__ : List[Any] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : List[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=2
A__ : Any = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
A__ : Optional[int] = x_den * x_den * y_den * y_den
if is_sq(__lowercase ) and is_sq(__lowercase ):
A__ : Union[str, Any] = int(sqrt(__lowercase ) )
A__ : int = int(sqrt(__lowercase ) )
A__ : Any = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : List[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=-1
A__ : Tuple = x_num * y_num
A__ : int = x_den * y_num + x_num * y_den
A__ : List[str] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : str = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
# n=2
A__ : Any = x_num * x_num * y_num * y_num
A__ : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(__lowercase ) and is_sq(__lowercase ):
A__ : Optional[int] = int(sqrt(__lowercase ) )
A__ : List[Any] = int(sqrt(__lowercase ) )
A__ : Union[str, Any] = gcd(__lowercase , __lowercase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A__ : Optional[Any] = add_three(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
unique_s.add(__lowercase )
for num, den in unique_s:
total += Fraction(__lowercase , __lowercase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 182
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=3 , __UpperCAmelCase=224 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__A : Tuple = size if size is not None else {"height": 18, "width": 18}
__A : Dict = parent
__A : Tuple = batch_size
__A : List[str] = num_channels
__A : str = image_size
__A : int = min_resolution
__A : str = max_resolution
__A : Optional[Any] = do_resize
__A : str = size
__A : Tuple = do_normalize
__A : List[Any] = image_mean
__A : str = image_std
def __UpperCAmelCase( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Optional[Any] = ViTImageProcessor if is_vision_available() else None
def __UpperCAmelCase( self ):
__A : List[Any] = EfficientFormerImageProcessorTester(self )
@property
def __UpperCAmelCase( self ):
return self.image_proc_tester.prepare_image_processor_dict()
def __UpperCAmelCase( self ):
__A : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "size" ) )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
# Initialize image_processor
__A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
__A : List[str] = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__A : List[Any] = image_processor(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processor
__A : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
__A : str = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__A : str = image_processor(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processor
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
__A : Tuple = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__A : List[Any] = image_processor(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 520
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings('ignore', category=UserWarning, module='torch.optim.lr_scheduler')
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = False ):
__A : str = scheduler
__A : Union[str, Any] = optimizers if isinstance(__UpperCAmelCase , (list, tuple) ) else [optimizers]
__A : Any = split_batches
__A : Tuple = step_with_optimizer
__A : Optional[Any] = GradientState()
def __UpperCAmelCase( self , *__UpperCAmelCase , **__UpperCAmelCase ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__A : Optional[Any] = AcceleratorState().num_processes
for _ in range(__UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
else:
self.scheduler.step(*__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase( self ):
return self.scheduler.get_last_lr()
def __UpperCAmelCase( self ):
return self.scheduler.state_dict()
def __UpperCAmelCase( self , __UpperCAmelCase ):
self.scheduler.load_state_dict(__UpperCAmelCase )
def __UpperCAmelCase( self ):
return self.scheduler.get_lr()
def __UpperCAmelCase( self , *__UpperCAmelCase , **__UpperCAmelCase ):
return self.scheduler.print_lr(*__UpperCAmelCase , **__UpperCAmelCase )
| 520
| 1
|
from collections.abc import Callable
def A__ ( _a : Callable[[float], float] , _a : float , _a : float ):
'''simple docstring'''
snake_case__ : float =a
snake_case__ : float =b
if function(_a ) == 0: # one of the a or b is a root for the function
return a
elif function(_a ) == 0:
return b
elif (
function(_a ) * function(_a ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
snake_case__ : float =start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_a ) == 0:
return mid
elif function(_a ) * function(_a ) < 0:
snake_case__ : Optional[Any] =mid
else:
snake_case__ : Optional[Any] =mid
snake_case__ : List[str] =start + (end - start) / 2.0
return mid
def A__ ( _a : float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 448
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( _A , unittest.TestCase ):
_a : Any = KandinskyImgaImgPipeline
_a : List[Any] = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
_a : int = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
_a : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_a : List[str] = False
@property
def lowercase__ ( self ):
return 3_2
@property
def lowercase__ ( self ):
return 3_2
@property
def lowercase__ ( self ):
return self.time_input_dim
@property
def lowercase__ ( self ):
return self.time_input_dim * 4
@property
def lowercase__ ( self ):
return 1_0_0
@property
def lowercase__ ( self ):
snake_case__ : Dict =XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : Optional[Any] =MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
snake_case__ : Optional[int] =MultilingualCLIP(a )
snake_case__ : Union[str, Any] =text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : Optional[int] ={
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
snake_case__ : List[str] =UNetaDConditionModel(**a )
return model
@property
def lowercase__ ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self ):
torch.manual_seed(0 )
snake_case__ : List[Any] =VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self ):
snake_case__ : str =self.dummy_text_encoder
snake_case__ : Tuple =self.dummy_tokenizer
snake_case__ : List[str] =self.dummy_unet
snake_case__ : Dict =self.dummy_movq
snake_case__ : Union[str, Any] ={
"""num_train_timesteps""": 1_0_0_0,
"""beta_schedule""": """linear""",
"""beta_start""": 0.00085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
snake_case__ : Union[str, Any] =DDIMScheduler(**a )
snake_case__ : Any ={
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowercase__ ( self , a , a=0 ):
snake_case__ : str =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a ) ).to(a )
snake_case__ : Tuple =floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a )
# create init_image
snake_case__ : str =floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(a ) ).to(a )
snake_case__ : Tuple =image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : Dict =Image.fromarray(np.uinta(a ) ).convert("""RGB""" ).resize((2_5_6, 2_5_6) )
if str(a ).startswith("""mps""" ):
snake_case__ : Dict =torch.manual_seed(a )
else:
snake_case__ : Optional[Any] =torch.Generator(device=a ).manual_seed(a )
snake_case__ : str ={
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 6_4,
"""width""": 6_4,
"""num_inference_steps""": 1_0,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowercase__ ( self ):
snake_case__ : List[str] ="""cpu"""
snake_case__ : Tuple =self.get_dummy_components()
snake_case__ : Optional[int] =self.pipeline_class(**a )
snake_case__ : Union[str, Any] =pipe.to(a )
pipe.set_progress_bar_config(disable=a )
snake_case__ : List[Any] =pipe(**self.get_dummy_inputs(a ) )
snake_case__ : List[str] =output.images
snake_case__ : int =pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
snake_case__ : str =image[0, -3:, -3:, -1]
snake_case__ : Tuple =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : List[str] =np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def lowercase__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self ):
snake_case__ : Any =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
snake_case__ : Dict =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
snake_case__ : Any ="""A red cartoon frog, 4k"""
snake_case__ : Dict =KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a )
snake_case__ : Tuple =KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
snake_case__ : str =pipeline.to(a )
pipeline.set_progress_bar_config(disable=a )
snake_case__ : Optional[Any] =torch.Generator(device="""cpu""" ).manual_seed(0 )
snake_case__ , snake_case__ : Any =pipe_prior(
a , generator=a , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
snake_case__ : str =pipeline(
a , image=a , image_embeds=a , negative_image_embeds=a , generator=a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type="""np""" , )
snake_case__ : Optional[int] =output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(a , a )
| 448
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __snake_case( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = CanineTokenizer
UpperCAmelCase : List[Any] = False
def __snake_case ( self ) -> str:
super().setUp()
lowerCAmelCase = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __snake_case ( self ) -> str:
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def __snake_case ( self , **A_ ) -> CanineTokenizer:
lowerCAmelCase = self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
lowerCAmelCase = 1024
return tokenizer
@require_torch
def __snake_case ( self ) -> str:
lowerCAmelCase = self.canine_tokenizer
lowerCAmelCase = ["""Life is like a box of chocolates.""", """You never know what you\'re gonna get."""]
# fmt: off
lowerCAmelCase = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
lowerCAmelCase = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors="""pt""" )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.canine_tokenizer
lowerCAmelCase = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
lowerCAmelCase = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , _lowerCamelCase )
self.assertIn("""attention_mask""" , _lowerCamelCase )
self.assertIn("""token_type_ids""" , _lowerCamelCase )
@require_torch
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.canine_tokenizer
lowerCAmelCase = [
"""What\'s the weater?""",
"""It\'s about 25 degrees.""",
]
lowerCAmelCase = tokenizer(
text_target=_lowerCamelCase , max_length=32 , padding="""max_length""" , truncation=_lowerCamelCase , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = """ He is very happy, UNwant\u00E9d,running"""
lowerCAmelCase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
lowerCAmelCase = tokenizer.__class__.from_pretrained(_lowerCamelCase )
lowerCAmelCase = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
lowerCAmelCase = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = """ He is very happy, UNwant\u00E9d,running"""
lowerCAmelCase = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCAmelCase = chr(0xe_007 )
additional_special_tokens.append(_lowerCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
lowerCAmelCase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
lowerCAmelCase = tokenizer.__class__.from_pretrained(_lowerCamelCase )
lowerCAmelCase = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertIn(_lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase = tokenizer.__class__.from_pretrained(_lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCamelCase )
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase, lowerCAmelCase = self.get_clean_sequence(_lowerCamelCase )
# a special token for Canine can be defined as follows:
lowerCAmelCase = 0xe_005
lowerCAmelCase = chr(_lowerCamelCase )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
lowerCAmelCase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 1 )
lowerCAmelCase = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCamelCase )
lowerCAmelCase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
lowerCAmelCase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
lowerCAmelCase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , input_encoded + special_token_id )
lowerCAmelCase = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
self.assertTrue(special_token not in decoded )
def __snake_case ( self ) -> str:
lowerCAmelCase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = chr(0xe_005 )
lowerCAmelCase = chr(0xe_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
lowerCAmelCase = tokenizer.tokenize(_lowerCamelCase )
lowerCAmelCase = tokenizer.tokenize(_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 1 )
self.assertEqual(len(_lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCamelCase )
self.assertEqual(token_a[0] , _lowerCamelCase )
@require_tokenizers
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
# a special token for Canine can be defined as follows:
lowerCAmelCase = 0xe_006
lowerCAmelCase = chr(_lowerCamelCase )
lowerCAmelCase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCamelCase )
tokenizer.from_pretrained(_lowerCamelCase )
def __snake_case ( self ) -> Dict:
lowerCAmelCase = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCAmelCase = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
lowerCAmelCase = json.load(_lowerCamelCase )
# a special token for Canine can be defined as follows:
lowerCAmelCase = 0xe_006
lowerCAmelCase = chr(_lowerCamelCase )
lowerCAmelCase = [new_token_a]
lowerCAmelCase = [new_token_a]
with open(os.path.join(_lowerCamelCase , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
with open(os.path.join(_lowerCamelCase , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase = tokenizer_class.from_pretrained(_lowerCamelCase , extra_ids=0 )
self.assertIn(_lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCAmelCase = 0xe_007
lowerCAmelCase = chr(_lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase = [AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase )]
lowerCAmelCase = tokenizer_class.from_pretrained(
_lowerCamelCase , additional_special_tokens=_lowerCamelCase , extra_ids=0 )
self.assertIn(_lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def __snake_case ( self ) -> Any:
lowerCAmelCase = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = """hello world"""
if self.space_between_special_tokens:
lowerCAmelCase = """[CLS] hello world [SEP]"""
else:
lowerCAmelCase = input
lowerCAmelCase = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
lowerCAmelCase = tokenizer.decode(_lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCamelCase , [output, output.lower()] )
def __snake_case ( self ) -> Optional[Any]:
lowerCAmelCase = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
lowerCAmelCase = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
lowerCAmelCase = """a"""
lowerCAmelCase = ord(_lowerCamelCase )
for attr in attributes_list:
setattr(_lowerCamelCase , attr + """_id""" , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , attr + """_id""" ) , _lowerCamelCase )
setattr(_lowerCamelCase , attr + """_id""" , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , attr + """_id""" ) , _lowerCamelCase )
setattr(_lowerCamelCase , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_lowerCamelCase , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_lowerCamelCase , """additional_special_tokens_ids""" ) , [] )
lowerCAmelCase = 0xe_006
lowerCAmelCase = chr(_lowerCamelCase )
setattr(_lowerCamelCase , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCamelCase , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCamelCase , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def __snake_case ( self ) -> Optional[int]:
pass
def __snake_case ( self ) -> int:
pass
def __snake_case ( self ) -> Dict:
pass
def __snake_case ( self ) -> Union[str, Any]:
pass
def __snake_case ( self ) -> List[Any]:
pass
def __snake_case ( self ) -> int:
pass
def __snake_case ( self ) -> List[Any]:
pass
def __snake_case ( self ) -> List[str]:
pass
| 433
|
'''simple docstring'''
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] ):
__lowercase = [
'''decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
__lowercase = emb.weight.data
return lin_layer
def _lowerCAmelCase ( lowerCamelCase_ : Tuple ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = Namespace(**checkpoint['''cfg''']['''model'''] )
__lowercase = checkpoint['''model''']
remove_ignore_keys_(lowerCamelCase_ )
__lowercase = state_dict['''decoder.embed_tokens.weight'''].shape[0]
__lowercase = {key.replace('''decoder''' , '''model''' ): val for key, val in state_dict.items()}
__lowercase = XGLMConfig(
vocab_size=lowerCamelCase_ , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''gelu''' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
__lowercase = XGLMForCausalLM(lowerCamelCase_ )
__lowercase = model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
print(lowerCamelCase_ )
__lowercase = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
_SCREAMING_SNAKE_CASE = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 502
| 0
|
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError("String must only contain alphabetic characters." )
lowerCAmelCase_ = sorted(string.lower() )
return len(__lowerCAmelCase ) == len(set(__lowerCAmelCase ) )
if __name__ == "__main__":
_A = input("Enter a string ").strip()
_A = is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 279
|
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_A = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _lowerCAmelCase ( nn.Module ):
def __init__( self , _UpperCamelCase ) -> Optional[Any]:
super().__init__()
lowerCAmelCase_ = torchvision.models.resnetaaa(pretrained=_UpperCamelCase )
lowerCAmelCase_ = list(model.children() )[:-2]
lowerCAmelCase_ = nn.Sequential(*_UpperCamelCase )
lowerCAmelCase_ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __a ( self , _UpperCamelCase ) -> Dict:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowerCAmelCase_ = self.pool(self.model(_UpperCamelCase ) )
lowerCAmelCase_ = torch.flatten(_UpperCamelCase , start_dim=2 )
lowerCAmelCase_ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _lowerCAmelCase ( __a ):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
lowerCAmelCase_ = [json.loads(_UpperCamelCase ) for l in open(_UpperCamelCase )]
lowerCAmelCase_ = os.path.dirname(_UpperCamelCase )
lowerCAmelCase_ = tokenizer
lowerCAmelCase_ = labels
lowerCAmelCase_ = len(_UpperCamelCase )
lowerCAmelCase_ = max_seq_length
lowerCAmelCase_ = transforms
def __len__( self ) -> Any:
return len(self.data )
def __getitem__( self , _UpperCamelCase ) -> Any:
lowerCAmelCase_ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=_UpperCamelCase ) )
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = sentence[0], sentence[1:-1], sentence[-1]
lowerCAmelCase_ = sentence[: self.max_seq_length]
lowerCAmelCase_ = torch.zeros(self.n_classes )
lowerCAmelCase_ = 1
lowerCAmelCase_ = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
lowerCAmelCase_ = self.transforms(_UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __a ( self ) -> str:
lowerCAmelCase_ = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def lowerCamelCase__ ( __lowerCAmelCase : List[str] ):
"""simple docstring"""
lowerCAmelCase_ = [len(row["sentence"] ) for row in batch]
lowerCAmelCase_ , lowerCAmelCase_ = len(__lowerCAmelCase ), max(__lowerCAmelCase )
lowerCAmelCase_ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
lowerCAmelCase_ = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
lowerCAmelCase_ = input_row["sentence"]
lowerCAmelCase_ = 1
lowerCAmelCase_ = torch.stack([row["image"] for row in batch] )
lowerCAmelCase_ = torch.stack([row["label"] for row in batch] )
lowerCAmelCase_ = torch.stack([row["image_start_token"] for row in batch] )
lowerCAmelCase_ = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase__ ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase__ ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] )
| 279
| 1
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__lowerCamelCase : Any = ["""\nclass""", """\ndef""", """\n#""", """\n@""", """\nprint""", """\nif"""]
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : Optional[Any] , UpperCamelCase_ : str , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Tuple=1 ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase_ : int = tokenizer
lowerCamelCase_ : Dict = dataset
lowerCamelCase_ : int = len(UpperCamelCase_ ) if n_tasks is None else n_tasks
lowerCamelCase_ : List[Any] = n_copies
def __iter__( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : str = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
lowerCamelCase_ : List[Any] = self.tokenizer(UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : List[str] , UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ : str = start_length
lowerCamelCase_ : List[str] = eof_strings
lowerCamelCase_ : Optional[Any] = tokenizer
def __call__( self : List[str] , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict , **UpperCamelCase_ : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
lowerCamelCase_ : List[str] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCamelCase_ )
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : int = re.split('''(%s)''' % '''|'''.join(__UpperCAmelCase ) , __UpperCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __snake_case (__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=20 , **__UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = defaultdict(__UpperCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__UpperCAmelCase ) ):
with torch.no_grad():
lowerCamelCase_ : Tuple = batch['''ids'''].shape[-1]
lowerCamelCase_ : List[Any] = accelerator.unwrap_model(__UpperCAmelCase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__UpperCAmelCase , **__UpperCAmelCase )
# each task is generated batch_size times
lowerCamelCase_ : Union[str, Any] = batch['''task_id'''].repeat(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = accelerator.pad_across_processes(
__UpperCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
lowerCamelCase_ , lowerCamelCase_ : Dict = accelerator.gather((generated_tokens, generated_tasks) )
lowerCamelCase_ : Optional[int] = generated_tokens.cpu().numpy()
lowerCamelCase_ : List[Any] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__UpperCAmelCase , __UpperCAmelCase ):
gen_token_dict[task].append(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = [[] for _ in range(__UpperCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
lowerCamelCase_ : Optional[Any] = tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase , clean_up_tokenization_spaces=__UpperCAmelCase )
code_gens[task].append(remove_last_block(__UpperCAmelCase ) )
return code_gens
def __snake_case ():
"""simple docstring"""
# Setup configuration
lowerCamelCase_ : Tuple = HfArgumentParser(__UpperCAmelCase )
lowerCamelCase_ : int = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
lowerCamelCase_ : List[Any] = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
lowerCamelCase_ : Union[str, Any] = '''false'''
if args.num_workers is None:
lowerCamelCase_ : Tuple = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
lowerCamelCase_ : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__UpperCAmelCase )
# Load model and tokenizer
lowerCamelCase_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
lowerCamelCase_ : Optional[int] = tokenizer.eos_token
lowerCamelCase_ : Dict = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
lowerCamelCase_ : Any = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __UpperCAmelCase , __UpperCAmelCase )] ),
}
# Load evaluation dataset and metric
lowerCamelCase_ : Dict = load_dataset('''openai_humaneval''' )
lowerCamelCase_ : Any = load_metric('''code_eval''' )
lowerCamelCase_ : Union[str, Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
lowerCamelCase_ : List[Any] = args.n_samples // args.batch_size
lowerCamelCase_ : Union[str, Any] = TokenizedDataset(__UpperCAmelCase , human_eval['''test'''] , n_copies=__UpperCAmelCase , n_tasks=__UpperCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
lowerCamelCase_ : Optional[Any] = DataLoader(__UpperCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
lowerCamelCase_ : Tuple = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
lowerCamelCase_ , lowerCamelCase_ : str = accelerator.prepare(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : int = complete_code(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , n_tasks=__UpperCAmelCase , batch_size=args.batch_size , **__UpperCAmelCase , )
if accelerator.is_main_process:
lowerCamelCase_ : Dict = []
for task in tqdm(range(__UpperCAmelCase ) ):
lowerCamelCase_ : Union[str, Any] = human_eval['''test'''][task]['''test''']
lowerCamelCase_ : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
lowerCamelCase_ , lowerCamelCase_ : Tuple = code_eval_metric.compute(
references=__UpperCAmelCase , predictions=__UpperCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 501
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCamelCase : Optional[int] = 16
__lowerCamelCase : Dict = 32
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCamelCase_ : str = torch.cuda.memory_allocated()
return self
def __exit__( self : Any , *UpperCamelCase_ : Dict ) -> List[Any]:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
lowerCamelCase_ : Dict = torch.cuda.memory_allocated()
lowerCamelCase_ : List[str] = torch.cuda.max_memory_allocated()
lowerCamelCase_ : Union[str, Any] = bamb(self.end - self.begin )
lowerCamelCase_ : Optional[Any] = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __snake_case (__UpperCAmelCase , __UpperCAmelCase = 16 , __UpperCAmelCase = "bert-base-cased" , __UpperCAmelCase = 320 , __UpperCAmelCase = 160 , ):
"""simple docstring"""
lowerCamelCase_ : int = AutoTokenizer.from_pretrained(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = load_dataset(
'''glue''' , '''mrpc''' , split={'''train''': F"""train[:{n_train}]""", '''validation''': F"""validation[:{n_val}]"""} )
def tokenize_function(__UpperCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase_ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase_ : str = datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , load_from_cache_file=__UpperCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase_ : Tuple = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__UpperCAmelCase , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(__UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
lowerCamelCase_ : Optional[Any] = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
lowerCamelCase_ : Dict = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCAmelCase , collate_fn=__UpperCAmelCase , batch_size=__UpperCAmelCase )
return train_dataloader, eval_dataloader
def __snake_case (__UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
# Initialize accelerator
lowerCamelCase_ : List[str] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ : Tuple = config['''lr''']
lowerCamelCase_ : Any = int(config['''num_epochs'''] )
lowerCamelCase_ : str = int(config['''seed'''] )
lowerCamelCase_ : Any = int(config['''batch_size'''] )
lowerCamelCase_ : List[str] = args.model_name_or_path
set_seed(__UpperCAmelCase )
lowerCamelCase_ , lowerCamelCase_ : Tuple = get_dataloaders(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(__UpperCAmelCase , return_dict=__UpperCAmelCase )
# Instantiate optimizer
lowerCamelCase_ : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or '''optimizer''' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase_ : int = optimizer_cls(params=model.parameters() , lr=__UpperCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase_ : Optional[int] = accelerator.state.deepspeed_plugin.deepspeed_config[
'''gradient_accumulation_steps'''
]
else:
lowerCamelCase_ : List[Any] = 1
lowerCamelCase_ : int = (len(__UpperCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase_ : List[Any] = get_linear_schedule_with_warmup(
optimizer=__UpperCAmelCase , num_warmup_steps=0 , num_training_steps=__UpperCAmelCase , )
else:
lowerCamelCase_ : List[Any] = DummyScheduler(__UpperCAmelCase , total_num_steps=__UpperCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[int] = accelerator.prepare(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase_ : Optional[int] = 0
# Now we train the model
lowerCamelCase_ : Optional[int] = {}
for epoch in range(__UpperCAmelCase , __UpperCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__UpperCAmelCase ):
lowerCamelCase_ : Optional[int] = model(**__UpperCAmelCase )
lowerCamelCase_ : List[str] = outputs.loss
lowerCamelCase_ : Dict = loss / gradient_accumulation_steps
accelerator.backward(__UpperCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('''Memory before entering the train : {}'''.format(bamb(tracemalloc.begin ) ) )
accelerator.print('''Memory consumed at the end of the train (end-begin): {}'''.format(tracemalloc.used ) )
accelerator.print('''Peak Memory consumed during the train (max-begin): {}'''.format(tracemalloc.peaked ) )
accelerator.print(
'''Total Peak Memory consumed during the train (max): {}'''.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCamelCase_ : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , '''peak_memory_utilization.json''' ) , '''w''' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def __snake_case ():
"""simple docstring"""
lowerCamelCase_ : List[Any] = argparse.ArgumentParser(description='''Simple example of training script tracking peak GPU memory usage.''' )
parser.add_argument(
'''--model_name_or_path''' , type=__UpperCAmelCase , default='''bert-base-cased''' , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=__UpperCAmelCase , )
parser.add_argument(
'''--output_dir''' , type=__UpperCAmelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , )
parser.add_argument(
'''--peak_memory_upper_bound''' , type=__UpperCAmelCase , default=__UpperCAmelCase , help='''The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.''' , )
parser.add_argument(
'''--n_train''' , type=__UpperCAmelCase , default=320 , help='''Number of training examples to use.''' , )
parser.add_argument(
'''--n_val''' , type=__UpperCAmelCase , default=160 , help='''Number of validation examples to use.''' , )
parser.add_argument(
'''--num_epochs''' , type=__UpperCAmelCase , default=1 , help='''Number of train epochs.''' , )
lowerCamelCase_ : List[Any] = parser.parse_args()
lowerCamelCase_ : List[str] = {'''lr''': 2E-5, '''num_epochs''': args.num_epochs, '''seed''': 42, '''batch_size''': 16}
training_function(__UpperCAmelCase , __UpperCAmelCase )
if __name__ == "__main__":
main()
| 501
| 1
|
import datasets
from .evaluate import evaluate
_lowercase = "\\n@inproceedings{Rajpurkar2016SQuAD10,\n title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},\n author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},\n booktitle={EMNLP},\n year={2016}\n}\n"
_lowercase = "\nThis metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).\n\nStanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by\ncrowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,\nfrom the corresponding reading passage, or the question might be unanswerable.\n"
_lowercase = "\nComputes SQuAD scores (F1 and EM).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair as given in the references (see below)\n - 'prediction_text': the text of the answer\n references: List of question-answers dictionaries with the following key-values:\n - 'id': id of the question-answer pair (see above),\n - 'answers': a Dict in the SQuAD dataset format\n {\n 'text': list of possible texts for the answer, as a list of strings\n 'answer_start': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n 'exact_match': Exact match (the normalized answer exactly match the gold answer)\n 'f1': The F-score of predicted tokens versus the gold answer\nExamples:\n\n >>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]\n >>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]\n >>> squad_metric = datasets.load_metric(\"squad\")\n >>> results = squad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 100.0, 'f1': 100.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
def snake_case_ ( self):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string'''), '''prediction_text''': datasets.Value('''string''')},
'''references''': {
'''id''': datasets.Value('''string'''),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string'''),
'''answer_start''': datasets.Value('''int32'''),
}),
},
}) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def snake_case_ ( self , a__ , a__):
A__ = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
A__ = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
A__ = evaluate(dataset=a__ , predictions=a__)
return score
| 719
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_lowercase = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''albert'''
def __init__( self , a__=3_0_0_0_0 , a__=1_2_8 , a__=4_0_9_6 , a__=1_2 , a__=1 , a__=6_4 , a__=1_6_3_8_4 , a__=1 , a__="gelu_new" , a__=0 , a__=0 , a__=5_1_2 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=0.1 , a__="absolute" , a__=0 , a__=2 , a__=3 , **a__ , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__)
A__ = vocab_size
A__ = embedding_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_hidden_groups
A__ = num_attention_heads
A__ = inner_group_num
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = classifier_dropout_prob
A__ = position_embedding_type
class _UpperCAmelCase ( A__ ):
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 526
| 0
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
SCREAMING_SNAKE_CASE__ : Optional[List[str]] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
SCREAMING_SNAKE_CASE__ : List[Any] = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class UpperCAmelCase_ :
__lowerCamelCase = True
__lowerCamelCase = None
# Automatically constructed
__lowerCamelCase = "PIL.Image.Image"
__lowerCamelCase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__lowerCamelCase = field(default='Image' , init=__lowerCamelCase , repr=__lowerCamelCase )
def __call__( self ):
return self.pa_type
def __UpperCAmelCase ( self , _lowerCAmelCase ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
UpperCAmelCase__ : Dict = np.array(_lowerCAmelCase )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_lowerCAmelCase , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_lowerCAmelCase )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}." )
else:
if is_local_path(_lowerCAmelCase ):
UpperCAmelCase__ : str = PIL.Image.open(_lowerCAmelCase )
else:
UpperCAmelCase__ : Any = path.split("""::""" )[-1]
try:
UpperCAmelCase__ : int = string_to_dict(_lowerCAmelCase , config.HUB_DATASETS_URL )["""repo_id"""]
UpperCAmelCase__ : int = token_per_repo_id.get(_lowerCAmelCase )
except ValueError:
UpperCAmelCase__ : Any = None
with xopen(_lowerCAmelCase , """rb""" , use_auth_token=_lowerCAmelCase ) as f:
UpperCAmelCase__ : str = BytesIO(f.read() )
UpperCAmelCase__ : str = PIL.Image.open(bytes_ )
else:
UpperCAmelCase__ : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __UpperCAmelCase ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def __UpperCAmelCase ( self , _lowerCAmelCase ):
if pa.types.is_string(storage.type ):
UpperCAmelCase__ : Any = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
UpperCAmelCase__ : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
UpperCAmelCase__ : Optional[int] = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
UpperCAmelCase__ : int = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
UpperCAmelCase__ : Optional[Any] = storage.field("""bytes""" )
else:
UpperCAmelCase__ : int = pa.array([None] * len(_lowerCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
UpperCAmelCase__ : Dict = storage.field("""path""" )
else:
UpperCAmelCase__ : Dict = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
UpperCAmelCase__ : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
UpperCAmelCase__ : str = pa.array(
[encode_np_array(np.array(_lowerCAmelCase ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
UpperCAmelCase__ : Any = pa.array([None] * len(_lowerCAmelCase ) , type=pa.string() )
UpperCAmelCase__ : Optional[int] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
@no_op_if_value_is_null
def path_to_bytes(_lowerCAmelCase ):
with xopen(_lowerCAmelCase , """rb""" ) as f:
UpperCAmelCase__ : Optional[int] = f.read()
return bytes_
UpperCAmelCase__ : Optional[Any] = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
UpperCAmelCase__ : Optional[int] = pa.array(
[os.path.basename(_lowerCAmelCase ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
UpperCAmelCase__ : List[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(_lowerCAmelCase , self.pa_type )
def _lowerCamelCase ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
UpperCAmelCase__ : List[Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _lowerCamelCase ( __lowerCamelCase ) -> bytes:
'''simple docstring'''
UpperCAmelCase__ : Tuple = BytesIO()
if image.format in list_image_compression_formats():
UpperCAmelCase__ : Union[str, Any] = image.format
else:
UpperCAmelCase__ : List[str] = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(__lowerCamelCase , format=__lowerCamelCase )
return buffer.getvalue()
def _lowerCamelCase ( __lowerCamelCase ) -> dict:
'''simple docstring'''
if hasattr(__lowerCamelCase , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def _lowerCamelCase ( __lowerCamelCase ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
UpperCAmelCase__ : Optional[int] = array.dtype
UpperCAmelCase__ : Optional[Any] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
UpperCAmelCase__ : List[Any] = dtype.kind
UpperCAmelCase__ : Dict = dtype.itemsize
UpperCAmelCase__ : Union[str, Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
UpperCAmelCase__ : Union[str, Any] = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." )
if dtype is not dest_dtype:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
UpperCAmelCase__ : List[Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
UpperCAmelCase__ : int = dtype_byteorder + dtype_kind + str(__lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = np.dtype(__lowerCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F"Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" )
UpperCAmelCase__ : int = PIL.Image.fromarray(array.astype(__lowerCamelCase ) )
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def _lowerCamelCase ( __lowerCamelCase ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = first_non_null_value(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__lowerCamelCase , np.ndarray ):
UpperCAmelCase__ : str = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
UpperCAmelCase__ : List[str] = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 79
|
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def snake_case__ ( _snake_case : List[str] , _snake_case : Optional[Any] ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case__ ( _snake_case : str , _snake_case : List[str] , _snake_case : List[str] ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case , keep_in_memory=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def snake_case__ ( _snake_case : List[str] , _snake_case : str , _snake_case : List[str] ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader(_snake_case , features=_snake_case , cache_dir=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_3": "float64", "col_1": "string", "col_2": "int64"},
] , )
def snake_case__ ( _snake_case : List[Any] , _snake_case : Tuple , _snake_case : Any ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader(_snake_case , features=_snake_case , cache_dir=_snake_case ).read()
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def snake_case__ ( _snake_case : List[str] , _snake_case : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase__ = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
UpperCamelCase__ = features.copy()
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = JsonDatasetReader(_snake_case , features=_snake_case , cache_dir=_snake_case ).read()
assert isinstance(_snake_case , _snake_case )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case__ ( _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Tuple ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case , split=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def snake_case__ ( _snake_case : Optional[int] , _snake_case : Optional[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
if issubclass(_snake_case , _snake_case ):
UpperCamelCase__ = jsonl_path
elif issubclass(_snake_case , _snake_case ):
UpperCamelCase__ = [jsonl_path]
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case ).read()
_check_json_dataset(_snake_case , _snake_case )
def snake_case__ ( _snake_case : List[str] , _snake_case : List[Any] , _snake_case : Dict=("train",) ):
"""simple docstring"""
assert isinstance(_snake_case , _snake_case )
for split in splits:
UpperCamelCase__ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : int ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase__ = JsonDatasetReader({"train": jsonl_path} , cache_dir=_snake_case , keep_in_memory=_snake_case ).read()
_check_json_datasetdict(_snake_case , _snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def snake_case__ ( _snake_case : Dict , _snake_case : Union[str, Any] , _snake_case : str ):
"""simple docstring"""
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = features.copy() if features else default_expected_features
UpperCamelCase__ = (
Features({feature: Value(_snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase__ = JsonDatasetReader({"train": jsonl_path} , features=_snake_case , cache_dir=_snake_case ).read()
_check_json_datasetdict(_snake_case , _snake_case )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def snake_case__ ( _snake_case : Union[str, Any] , _snake_case : Any , _snake_case : Tuple ):
"""simple docstring"""
if split:
UpperCamelCase__ = {split: jsonl_path}
else:
UpperCamelCase__ = "train"
UpperCamelCase__ = {"train": jsonl_path, "test": jsonl_path}
UpperCamelCase__ = tmp_path / "cache"
UpperCamelCase__ = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCamelCase__ = JsonDatasetReader(_snake_case , cache_dir=_snake_case ).read()
_check_json_datasetdict(_snake_case , _snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def snake_case__ ( _snake_case : List[str] ):
"""simple docstring"""
return json.load(_snake_case )
def snake_case__ ( _snake_case : Union[str, Any] ):
"""simple docstring"""
return [json.loads(_snake_case ) for line in buffer]
class lowerCAmelCase :
'''simple docstring'''
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[Any] ) -> int:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json_function(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(exported_content[0] , lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def lowerCamelCase__ ( self :str , lowerCamelCase_ :int , lowerCamelCase_ :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :List[str] ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , orient=lowerCamelCase_ ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :Any , lowerCamelCase_ :int , lowerCamelCase_ :List[Any] ) -> Tuple:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json_function(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert isinstance(exported_content[0] , lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 1_0
@pytest.mark.parametrize(
"orient, container, keys, len_at" , [
("records", list, {"tokens", "labels", "answers", "id"}, None),
("split", dict, {"columns", "data"}, "data"),
("index", dict, set("0123456789" ), None),
("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"),
("values", list, None, None),
("table", dict, {"schema", "data"}, "data"),
] , )
def lowerCamelCase__ ( self :Any , lowerCamelCase_ :str , lowerCamelCase_ :List[str] , lowerCamelCase_ :Tuple , lowerCamelCase_ :str , lowerCamelCase_ :Dict ) -> Optional[Any]:
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , lines=lowerCamelCase_ , orient=lowerCamelCase_ , num_proc=2 ).write()
buffer.seek(0 )
UpperCamelCase__ = load_json(lowerCamelCase_ )
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(lowerCamelCase_ , "keys" ) and not hasattr(exported_content[0] , "keys" )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(lowerCamelCase_ ) == 1_0
def lowerCamelCase__ ( self :str , lowerCamelCase_ :Any ) -> Any:
"""simple docstring"""
with pytest.raises(lowerCamelCase_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , num_proc=0 )
@pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] )
def lowerCamelCase__ ( self :int , lowerCamelCase_ :List[Any] , lowerCamelCase_ :str , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :Optional[int] ) -> str:
"""simple docstring"""
UpperCamelCase__ = tmp_path_factory.mktemp("data" ) / f'test.json.{extension}'
UpperCamelCase__ = str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(lowerCamelCase_ , lowerCamelCase_ , compression=lowerCamelCase_ ).write()
with fsspec.open(lowerCamelCase_ , "rb" , compression="infer" ) as f:
UpperCamelCase__ = f.read()
with fsspec.open(lowerCamelCase_ , "rb" , compression="infer" ) as f:
UpperCamelCase__ = f.read()
assert exported_content == original_content
| 516
| 0
|
"""simple docstring"""
from collections import Counter
from timeit import timeit
def A_ ( _lowercase = "", ):
'''simple docstring'''
return sum(c % 2 for c in Counter(input_str.replace(""" """, """""" ).lower() ).values() ) < 2
def A_ ( _lowercase = "" ):
'''simple docstring'''
if len(_lowercase ) == 0:
return True
snake_case_ :int = input_str.replace(""" """, """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
snake_case_ :int = {}
for character in lower_case_input_str:
snake_case_ :Tuple = character_freq_dict.get(_lowercase, 0 ) + 1
snake_case_ :Any = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def A_ ( _lowercase = "" ):
'''simple docstring'''
print("""\nFor string = """, _lowercase, """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""", """\tans =""", can_string_be_rearranged_as_palindrome_counter(_lowercase ), """\ttime =""", timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""", setup="""import __main__ as z""", ), """seconds""", )
print(
"""> can_string_be_rearranged_as_palindrome()""", """\tans =""", can_string_be_rearranged_as_palindrome(_lowercase ), """\ttime =""", timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""", setup="""import __main__ as z""", ), """seconds""", )
if __name__ == "__main__":
__a = input(
"Enter string to determine if it can be rearranged as a palindrome or not: "
).strip()
benchmark(check_str)
__a = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 720
|
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def A_ ( ):
'''simple docstring'''
snake_case_ :Tuple = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
snake_case_ :Union[str, Any] = Dataset.from_dict(_lowercase )
return dataset
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self: Optional[int] ) -> List[Any]:
snake_case_ :Optional[Any] = get_dataset()
snake_case_ :Any = make_duplicate_clusters(snake_case , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def lowerCAmelCase_ ( self: List[str] ) -> str:
snake_case_ :Optional[int] = get_dataset()
snake_case_, snake_case_ :List[Any] = deduplicate_dataset(snake_case )
self.assertEqual(len(snake_case ) , 2 )
print(snake_case )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , snake_case )
| 310
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
_snake_case = {
'''google/rembert''': 2_56,
}
_snake_case = '''▁'''
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Tuple = VOCAB_FILES_NAMES
__A : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Union[str, Any] = RemBertTokenizer
def __init__( self , __A=None , __A=None , __A=True , __A=True , __A=False , __A="[CLS]" , __A="[SEP]" , __A="<unk>" , __A="[SEP]" , __A="<pad>" , __A="[CLS]" , __A="[MASK]" , **__A , ):
"""simple docstring"""
lowerCamelCase : Dict = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
lowerCamelCase : str = do_lower_case
lowerCamelCase : Dict = remove_space
lowerCamelCase : Dict = keep_accents
lowerCamelCase : Union[str, Any] = vocab_file
lowerCamelCase : Optional[int] = False if not self.vocab_file else True
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : int = [self.sep_token_id]
lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self , __A , __A = None , __A = False ):
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : List[Any] = [self.sep_token_id]
lowerCamelCase : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error("Vocabulary path ({}) should be a directory".format(__A ) )
return
lowerCamelCase : Union[str, Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 340
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_snake_case = {
'''169M''': 12,
'''430M''': 24,
'''1B5''': 24,
'''3B''': 32,
'''7B''': 32,
'''14B''': 40,
}
_snake_case = {
'''169M''': 7_68,
'''430M''': 10_24,
'''1B5''': 20_48,
'''3B''': 25_60,
'''7B''': 40_96,
'''14B''': 51_20,
}
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = list(state_dict.keys() )
for name in state_dict_keys:
lowerCamelCase : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
# emb -> embedding
if name.startswith("emb." ):
lowerCamelCase : Optional[int] = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
lowerCamelCase : Union[str, Any] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
lowerCamelCase : List[Any] = re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , SCREAMING_SNAKE_CASE_ )
# ffn -> feed_forward
lowerCamelCase : int = re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , SCREAMING_SNAKE_CASE_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
lowerCamelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
lowerCamelCase : Any = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
lowerCamelCase : List[str] = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
lowerCamelCase : int = "rwkv." + name
lowerCamelCase : Dict = weight
return state_dict
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
lowerCamelCase : List[Any] = 50277
lowerCamelCase : str = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
lowerCamelCase : int = PreTrainedTokenizerFast(tokenizer_file=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
# 2. Build the config
lowerCamelCase : Optional[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
lowerCamelCase : int = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f"""`size` should be one of {possible_sizes}, got {size}.""" )
lowerCamelCase : Optional[Any] = RwkvConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
# 3. Download model file then convert state_dict
lowerCamelCase : Any = hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
lowerCamelCase : Tuple = convert_state_dict(SCREAMING_SNAKE_CASE_ )
# 4. Split in shards and save
lowerCamelCase , lowerCamelCase : int = shard_checkpoint(SCREAMING_SNAKE_CASE_ )
for shard_file, shard in shards.items():
torch.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if index is not None:
lowerCamelCase : Any = os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save the index as well
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
lowerCamelCase : Dict = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + "\n"
f.write(SCREAMING_SNAKE_CASE_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
lowerCamelCase : int = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
lowerCamelCase : List[Any] = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
lowerCamelCase : int = AutoModelForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
model.push_to_hub(SCREAMING_SNAKE_CASE_ , max_shard_size="2GB" )
tokenizer.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--repo_id''', default=None, type=str, required=True, help='''Repo ID from which to pull the checkpoint.'''
)
parser.add_argument(
'''--checkpoint_file''', default=None, type=str, required=True, help='''Name of the checkpoint file in the repo.'''
)
parser.add_argument(
'''--output_dir''', default=None, type=str, required=True, help='''Where to save the converted model.'''
)
parser.add_argument(
'''--tokenizer_file''',
default=None,
type=str,
help='''Path to the tokenizer file to use (if not provided, only the model is converted).''',
)
parser.add_argument(
'''--size''',
default=None,
type=str,
help='''Size of the model. Will be inferred from the `checkpoint_file` if not passed.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Push to the Hub the converted model.''',
)
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''Name of the pushed model on the Hub, including the username / organization.''',
)
_snake_case = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 340
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(snake_case ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_ ( self ):
'''simple docstring'''
snake_case : List[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=SCREAMING_SNAKE_CASE_ )
@slow
def snake_case_ ( self ):
'''simple docstring'''
self.resolver.convert_models(["""heb-eng"""] )
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case , snake_case : Dict = self.resolver.write_model_card("""opus-mt-he-en""" ,dry_run=SCREAMING_SNAKE_CASE_ )
assert mmeta["long_pair"] == "heb-eng"
| 315
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase : str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
__lowercase : Optional[Any] = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', f'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.cross_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias'''))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.weight''', f'''decoder.layers.{i}.sa_qcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.weight''', f'''decoder.layers.{i}.sa_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qpos_proj.weight''', f'''decoder.layers.{i}.sa_qpos_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kpos_proj.weight''', f'''decoder.layers.{i}.sa_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.weight''', f'''decoder.layers.{i}.sa_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.weight''', f'''decoder.layers.{i}.ca_qcontent_proj.weight''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.weight''', f'''decoder.layers.{i}.ca_kcontent_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kpos_proj.weight''', f'''decoder.layers.{i}.ca_kpos_proj.weight''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.weight''', f'''decoder.layers.{i}.ca_v_proj.weight'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight''', f'''decoder.layers.{i}.ca_qpos_sine_proj.weight''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_qcontent_proj.bias''', f'''decoder.layers.{i}.sa_qcontent_proj.bias''')
)
rename_keys.append(
(f'''transformer.decoder.layers.{i}.sa_kcontent_proj.bias''', f'''decoder.layers.{i}.sa_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_qpos_proj.bias''', f'''decoder.layers.{i}.sa_qpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_kpos_proj.bias''', f'''decoder.layers.{i}.sa_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.sa_v_proj.bias''', f'''decoder.layers.{i}.sa_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qcontent_proj.bias''', f'''decoder.layers.{i}.ca_qcontent_proj.bias''')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_kcontent_proj.bias''', f'''decoder.layers.{i}.ca_kcontent_proj.bias''')
)
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_kpos_proj.bias''', f'''decoder.layers.{i}.ca_kpos_proj.bias'''))
rename_keys.append((f'''transformer.decoder.layers.{i}.ca_v_proj.bias''', f'''decoder.layers.{i}.ca_v_proj.bias'''))
rename_keys.append(
(f'''transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias''', f'''decoder.layers.{i}.ca_qpos_sine_proj.bias''')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def lowercase ( __A : Optional[int] , __A : List[str] , __A : str ) -> Dict:
'''simple docstring'''
snake_case : Any = state_dict.pop(__A )
snake_case : Optional[Any] = val
def lowercase ( __A : Any ) -> Union[str, Any]:
'''simple docstring'''
snake_case : List[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case : Optional[int] = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
snake_case : List[Any] = value
else:
snake_case : List[str] = value
return new_state_dict
def lowercase ( __A : List[Any] , __A : List[Any]=False ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = """"""
if is_panoptic:
snake_case : Dict = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case : Union[str, Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case : str = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : List[Any] = in_proj_weight[:256, :]
snake_case : Optional[Any] = in_proj_bias[:256]
snake_case : Tuple = in_proj_weight[256:512, :]
snake_case : Any = in_proj_bias[256:512]
snake_case : int = in_proj_weight[-256:, :]
snake_case : Any = in_proj_bias[-256:]
def lowercase ( ) -> Optional[int]:
'''simple docstring'''
snake_case : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case : List[Any] = Image.open(requests.get(__A , stream=__A ).raw )
return im
@torch.no_grad()
def lowercase ( __A : Union[str, Any] , __A : int ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case : Optional[Any] = """resnet101"""
if "dc5" in model_name:
snake_case : Optional[Any] = True
snake_case : Optional[int] = """panoptic""" in model_name
if is_panoptic:
snake_case : str = 250
else:
snake_case : Dict = 91
snake_case : Dict = """huggingface/label-files"""
snake_case : List[str] = """coco-detection-id2label.json"""
snake_case : Optional[int] = json.load(open(hf_hub_download(__A , __A , repo_type="""dataset""" ) , """r""" ) )
snake_case : Tuple = {int(__A ): v for k, v in idalabel.items()}
snake_case : List[str] = idalabel
snake_case : Tuple = {v: k for k, v in idalabel.items()}
# load image processor
snake_case : Dict = """coco_panoptic""" if is_panoptic else """coco_detection"""
snake_case : Any = ConditionalDetrImageProcessor(format=__A )
# prepare image
snake_case : List[str] = prepare_img()
snake_case : List[str] = image_processor(images=__A , return_tensors="""pt""" )
snake_case : Dict = encoding["""pixel_values"""]
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
snake_case : Tuple = torch.hub.load("""DeppMeng/ConditionalDETR""" , __A , pretrained=__A ).eval()
snake_case : str = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case : Any = """conditional_detr.""" + src
rename_key(__A , __A , __A )
snake_case : Optional[int] = rename_backbone_keys(__A )
# query, key and value matrices need special treatment
read_in_q_k_v(__A , is_panoptic=__A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case : Tuple = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
snake_case : str = state_dict.pop(__A )
snake_case : Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case : Dict = state_dict.pop(__A )
snake_case : Optional[int] = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
snake_case : Optional[int] = state_dict.pop(__A )
snake_case : Optional[Any] = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
snake_case : List[Any] = state_dict.pop(__A )
snake_case : Optional[int] = val
# finally, create HuggingFace model and load state dict
snake_case : str = ConditionalDetrForSegmentation(__A ) if is_panoptic else ConditionalDetrForObjectDetection(__A )
model.load_state_dict(__A )
model.eval()
model.push_to_hub(repo_id=__A , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
snake_case : Tuple = conditional_detr(__A )
snake_case : Tuple = model(__A )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1E-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1E-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
image_processor.save_pretrained(__A )
if __name__ == "__main__":
__lowercase : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
__lowercase : Optional[int] = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 315
| 1
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Union[str, Any] = f"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE )
if number < 0:
return False
__UpperCamelCase :Dict = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , A_=False , A_=True , A_=True , A_=[0.5, 0.5, 0.5] , A_=[0.5, 0.5, 0.5] , )-> Dict:
'''simple docstring'''
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = num_channels
UpperCamelCase = image_size
UpperCamelCase = min_resolution
UpperCamelCase = max_resolution
UpperCamelCase = do_resize
UpperCamelCase = size if size is not None else {'height': 18, 'width': 20}
UpperCamelCase = do_thumbnail
UpperCamelCase = do_align_axis
UpperCamelCase = do_pad
UpperCamelCase = do_normalize
UpperCamelCase = image_mean
UpperCamelCase = image_std
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = DonutImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
UpperCamelCase = DonutImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self )-> str:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'do_thumbnail' ) )
self.assertTrue(hasattr(A_ , 'do_align_long_axis' ) )
self.assertTrue(hasattr(A_ , 'do_pad' ) )
self.assertTrue(hasattr(A_ , 'do_normalize' ) )
self.assertTrue(hasattr(A_ , 'image_mean' ) )
self.assertTrue(hasattr(A_ , 'image_std' ) )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 20} )
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
# Previous config had dimensions in (width, height) order
UpperCamelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'height': 84, 'width': 42} )
def UpperCAmelCase_ ( self )-> Tuple:
'''simple docstring'''
pass
@is_flaky()
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
@is_flaky()
def UpperCAmelCase_ ( self )-> Dict:
'''simple docstring'''
UpperCamelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
UpperCamelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
UpperCamelCase = image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
| 3
| 0
|
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 701
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
def lowercase_ ( self ):
__snake_case : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'neck_hidden_sizes' ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , 'num_attention_heads' ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=3 , _UpperCAmelCase=640 , _UpperCAmelCase=4 , _UpperCAmelCase="silu" , _UpperCAmelCase=3 , _UpperCAmelCase=32 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=10 , _UpperCAmelCase=None , ):
__snake_case : List[str] = parent
__snake_case : Tuple = batch_size
__snake_case : str = image_size
__snake_case : Union[str, Any] = patch_size
__snake_case : Optional[int] = num_channels
__snake_case : List[str] = last_hidden_size
__snake_case : Optional[Any] = num_attention_heads
__snake_case : Dict = hidden_act
__snake_case : List[Any] = conv_kernel_size
__snake_case : int = output_stride
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Any = classifier_dropout_prob
__snake_case : str = use_labels
__snake_case : Optional[Any] = is_training
__snake_case : Dict = num_labels
__snake_case : str = initializer_range
__snake_case : Union[str, Any] = scope
def lowercase_ ( self ):
__snake_case : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case : str = None
__snake_case : Dict = None
if self.use_labels:
__snake_case : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase_ ( self ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : List[Any] = MobileViTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : List[Any] = model(_UpperCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Tuple = self.num_labels
__snake_case : Tuple = MobileViTForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Union[str, Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : Optional[Any] = self.num_labels
__snake_case : int = MobileViTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
__snake_case : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case : List[Any] = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Any = config_and_inputs
__snake_case : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
"""simple docstring"""
__UpperCAmelCase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def lowercase_ ( self ):
__snake_case : Dict = MobileViTModelTester(self )
__snake_case : str = MobileViTConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase_ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViT does not use inputs_embeds' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not support input and output embeddings' )
def lowercase_ ( self ):
pass
@unittest.skip(reason='MobileViT does not output attentions' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Tuple = model_class(_UpperCAmelCase )
__snake_case : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : List[str] = [*signature.parameters.keys()]
__snake_case : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase_ ( self ):
def check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__snake_case : str = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
__snake_case : str = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__snake_case : Optional[Any] = outputs.hidden_states
__snake_case : str = 5
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case : Optional[Any] = 2
for i in range(len(_UpperCAmelCase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Dict = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Tuple = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
@slow
def lowercase_ ( self ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Any = MobileViTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def UpperCAmelCase__( ):
__snake_case : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return MobileViTImageProcessor.from_pretrained('apple/mobilevit-xx-small' ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
__snake_case : Tuple = MobileViTForImageClassification.from_pretrained('apple/mobilevit-xx-small' ).to(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.default_image_processor
__snake_case : str = prepare_img()
__snake_case : Any = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Tuple = model(**_UpperCAmelCase )
# verify the logits
__snake_case : Tuple = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
__snake_case : Any = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : int = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : List[Any] = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Optional[int] = prepare_img()
__snake_case : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : int = model(**_UpperCAmelCase )
__snake_case : int = outputs.logits
# verify the logits
__snake_case : Union[str, Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _UpperCAmelCase )
__snake_case : Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=_UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _UpperCAmelCase , atol=1E-4 ) )
@slow
def lowercase_ ( self ):
__snake_case : str = MobileViTForSemanticSegmentation.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : str = model.to(_UpperCAmelCase )
__snake_case : Dict = MobileViTImageProcessor.from_pretrained('apple/deeplabv3-mobilevit-xx-small' )
__snake_case : Any = prepare_img()
__snake_case : Optional[int] = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
__snake_case : Optional[Any] = model(**_UpperCAmelCase )
__snake_case : str = outputs.logits.detach().cpu()
__snake_case : Dict = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase , target_sizes=[(50, 60)] )
__snake_case : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
__snake_case : Tuple = image_processor.post_process_semantic_segmentation(outputs=_UpperCAmelCase )
__snake_case : List[str] = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _UpperCAmelCase )
| 679
| 0
|
"""simple docstring"""
def _A( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if index == number_of_items:
return 0
A__ : Dict = 0
A__ : Tuple = 0
A__ : List[Any] = knapsack(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , index + 1 )
if weights[index] <= max_weight:
A__ : List[str] = values[index] + knapsack(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , max_weight - weights[index] , index + 1 )
return max(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class a ( lowerCAmelCase_ ):
_snake_case : List[Any] = 'realm'
def __init__( self : List[Any] , __lowerCAmelCase : List[str]=3_0522 , __lowerCAmelCase : Tuple=768 , __lowerCAmelCase : List[Any]=128 , __lowerCAmelCase : Tuple=12 , __lowerCAmelCase : List[str]=12 , __lowerCAmelCase : Union[str, Any]=8 , __lowerCAmelCase : List[str]=3072 , __lowerCAmelCase : Union[str, Any]="gelu_new" , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Tuple=0.1 , __lowerCAmelCase : List[str]=512 , __lowerCAmelCase : Dict=2 , __lowerCAmelCase : List[Any]=0.02 , __lowerCAmelCase : Optional[int]=1e-1_2 , __lowerCAmelCase : Any=256 , __lowerCAmelCase : Union[str, Any]=10 , __lowerCAmelCase : Dict=1e-3 , __lowerCAmelCase : str=5 , __lowerCAmelCase : str=320 , __lowerCAmelCase : Tuple=1335_3718 , __lowerCAmelCase : Optional[Any]=5000 , __lowerCAmelCase : int=1 , __lowerCAmelCase : Dict=0 , __lowerCAmelCase : Any=2 , **__lowerCAmelCase : Optional[int] , ):
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
# Common config
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = retriever_proj_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = num_candidates
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = layer_norm_eps
# Reader config
_UpperCAmelCase = span_hidden_size
_UpperCAmelCase = max_span_width
_UpperCAmelCase = reader_layer_norm_eps
_UpperCAmelCase = reader_beam_size
_UpperCAmelCase = reader_seq_len
# Retrieval config
_UpperCAmelCase = num_block_records
_UpperCAmelCase = searcher_beam_size
| 277
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A : int = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Any = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : Dict = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 721
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
A : Tuple = logging.get_logger(__name__)
class lowerCAmelCase_ ( a_ ):
def __init__( self : List[Any], *_snake_case : Dict, **_snake_case : str ):
'''simple docstring'''
warnings.warn(
'''The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use ImageGPTImageProcessor instead.''', _snake_case, )
super().__init__(*_snake_case, **_snake_case )
| 136
| 0
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class snake_case (UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ :List[Any] = 1
@register_to_config
def __init__( self ,UpperCAmelCase_=2_000 ,UpperCAmelCase_=0.1 ,UpperCAmelCase_=20 ,UpperCAmelCase_=1E-3 ) -> List[str]:
lowercase__ = None
lowercase__ = None
lowercase__ = None
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ = None ) -> int:
lowercase__ = torch.linspace(1 ,self.config.sampling_eps ,UpperCAmelCase_ ,device=UpperCAmelCase_ )
def _a ( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_=None ) -> List[Any]:
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
lowercase__ = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
lowercase__ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
lowercase__ = std.flatten()
while len(std.shape ) < len(score.shape ):
lowercase__ = std.unsqueeze(-1 )
lowercase__ = -score / std
# compute
lowercase__ = -1.0 / len(self.timesteps )
lowercase__ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
lowercase__ = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
lowercase__ = beta_t.unsqueeze(-1 )
lowercase__ = -0.5 * beta_t * x
lowercase__ = torch.sqrt(UpperCAmelCase_ )
lowercase__ = drift - diffusion**2 * score
lowercase__ = x + drift * dt
# add noise
lowercase__ = randn_tensor(x.shape ,layout=x.layout ,generator=UpperCAmelCase_ ,device=x.device ,dtype=x.dtype )
lowercase__ = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self ) -> List[str]:
return self.config.num_train_timesteps
| 267
|
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 256
# Modulus to hash a string
SCREAMING_SNAKE_CASE__ = 100_0003
def lowerCamelCase ( _snake_case : str ,_snake_case : str ):
'''simple docstring'''
lowercase__ = len(_snake_case )
lowercase__ = len(_snake_case )
if p_len > t_len:
return False
lowercase__ = 0
lowercase__ = 0
lowercase__ = 1
# Calculating the hash of pattern and substring of text
for i in range(_snake_case ):
lowercase__ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase__ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase__ = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase__ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = "abc1abc12"
lowercase__ = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase__ = "alskfjaldsk23adsfabcabc"
assert rabin_karp(_snake_case ,_snake_case ) and not rabin_karp(_snake_case ,_snake_case )
# Test 2)
lowercase__ = "ABABX"
lowercase__ = "ABABZABABYABABX"
assert rabin_karp(_snake_case ,_snake_case )
# Test 3)
lowercase__ = "AAAB"
lowercase__ = "ABAAAAAB"
assert rabin_karp(_snake_case ,_snake_case )
# Test 4)
lowercase__ = "abcdabcy"
lowercase__ = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(_snake_case ,_snake_case )
# Test 5)
lowercase__ = "Lü"
lowercase__ = "Lüsai"
assert rabin_karp(_snake_case ,_snake_case )
lowercase__ = "Lue"
assert not rabin_karp(_snake_case ,_snake_case )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 267
| 1
|
"""simple docstring"""
import math
import qiskit
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 , SCREAMING_SNAKE_CASE : int = 1 ):
'''simple docstring'''
if (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
or isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
):
raise TypeError("""inputs must be integers.""" )
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""" )
if (
(math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE ) != carry_in)
):
raise ValueError("""inputs must be exact integers.""" )
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""" )
# build registers
lowerCAmelCase = qiskit.QuantumRegister(4 , """qr""" )
lowerCAmelCase = qiskit.ClassicalRegister(2 , """cr""" )
# list the entries
lowerCAmelCase = [input_a, input_a, carry_in]
lowerCAmelCase = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for i in range(0 , 3 ):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE ) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE ) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE ) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3 ) # ccx = toffoli gate
quantum_circuit.cx(0 , 1 )
quantum_circuit.ccx(1 , 2 , 3 )
quantum_circuit.cx(1 , 2 )
quantum_circuit.cx(0 , 1 )
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE ) # measure the last two qbits
lowerCAmelCase = qiskit.Aer.get_backend("""aer_simulator""" )
lowerCAmelCase = qiskit.execute(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , shots=10_00 )
return job.result().get_counts(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(f'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 393
|
"""simple docstring"""
import os
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str = "input.txt" ):
'''simple docstring'''
with open(os.path.join(os.path.dirname(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) ) as input_file:
lowerCAmelCase = [
[int(SCREAMING_SNAKE_CASE ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase = len(matrix[0] )
lowerCAmelCase = [[-1 for _ in range(SCREAMING_SNAKE_CASE )] for _ in range(SCREAMING_SNAKE_CASE )]
for i in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = matrix[i][0]
for j in range(1 , SCREAMING_SNAKE_CASE ):
for i in range(SCREAMING_SNAKE_CASE ):
lowerCAmelCase = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , SCREAMING_SNAKE_CASE ):
lowerCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowerCAmelCase = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'{solution() = }')
| 393
| 1
|
'''simple docstring'''
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : list[list[float]] = []
for data in source_data:
for i, el in enumerate(_lowercase ):
if len(_lowercase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_lowercase ) )
return data_lists
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : list[list[float]] = []
for dlist, weight in zip(_lowercase , _lowercase ):
__a : Optional[int] = min(_lowercase )
__a : List[Any] = max(_lowercase )
__a : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__a : Union[str, Any] = f'''Invalid weight of {weight:f} provided'''
raise ValueError(_lowercase )
score_lists.append(_lowercase )
return score_lists
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_lowercase ):
__a : List[Any] = final_scores[j] + ele
return final_scores
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : List[Any] = get_data(_lowercase )
__a : Optional[Any] = calculate_each_score(_lowercase , _lowercase )
__a : Any = generate_final_scores(_lowercase )
# append scores to source data
for i, ele in enumerate(_lowercase ):
source_data[i].append(_lowercase )
return source_data
| 597
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
SCREAMING_SNAKE_CASE_ = 'scheduler_config.json'
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = 1
__lowerCAmelCase = 2
__lowerCAmelCase = 3
__lowerCAmelCase = 4
__lowerCAmelCase = 5
@dataclass
class a ( __lowerCAmelCase ):
"""simple docstring"""
__lowerCAmelCase = 42
class a :
"""simple docstring"""
__lowerCAmelCase = SCHEDULER_CONFIG_NAME
__lowerCAmelCase = ["""dtype"""]
__lowerCAmelCase = []
__lowerCAmelCase = True
@classmethod
def lowercase_ ( cls , snake_case_ = None , snake_case_ = None , snake_case_=False , **snake_case_ , ):
'''simple docstring'''
__UpperCAmelCase, __UpperCAmelCase: List[str] = cls.load_config(
pretrained_model_name_or_path=snake_case_ , subfolder=snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ , )
__UpperCAmelCase, __UpperCAmelCase: Union[str, Any] = cls.from_config(snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ )
if hasattr(snake_case_ , """create_state""" ) and getattr(snake_case_ , """has_state""" , snake_case_ ):
__UpperCAmelCase: Union[str, Any] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowercase_ ( self , snake_case_ , snake_case_ = False , **snake_case_ ):
'''simple docstring'''
self.save_config(save_directory=snake_case_ , push_to_hub=snake_case_ , **snake_case_ )
@property
def lowercase_ ( self ):
'''simple docstring'''
return self._get_compatibles()
@classmethod
def lowercase_ ( cls ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__UpperCAmelCase: Union[str, Any] = importlib.import_module(__name__.split(""".""" )[0] )
__UpperCAmelCase: Union[str, Any] = [
getattr(snake_case_ , snake_case_ ) for c in compatible_classes_str if hasattr(snake_case_ , snake_case_ )
]
return compatible_classes
def UpperCamelCase__ ( _lowercase : jnp.ndarray , _lowercase : Tuple[int] ) -> jnp.ndarray:
assert len(_lowercase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowercase ) - x.ndim) ) , _lowercase )
def UpperCamelCase__ ( _lowercase : int , _lowercase : List[str]=0.9_99 , _lowercase : List[Any]=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(_lowercase : Optional[Any] ):
return math.cos((time_step + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
__UpperCAmelCase: Tuple = []
for i in range(_lowercase ):
__UpperCAmelCase: Any = i / num_diffusion_timesteps
__UpperCAmelCase: Tuple = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowercase ) / alpha_bar(_lowercase ) , _lowercase ) )
return jnp.array(_lowercase , dtype=_lowercase )
@flax.struct.dataclass
class a :
"""simple docstring"""
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = 42
@classmethod
def lowercase_ ( cls , snake_case_ ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = scheduler.config
if config.trained_betas is not None:
__UpperCAmelCase: Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__UpperCAmelCase: Tuple = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__UpperCAmelCase: List[str] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__UpperCAmelCase: Any = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__UpperCAmelCase: List[Any] = 1.0 - betas
__UpperCAmelCase: Tuple = jnp.cumprod(snake_case_ , axis=0 )
return cls(
alphas=snake_case_ , betas=snake_case_ , alphas_cumprod=snake_case_ , )
def UpperCamelCase__ ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) -> Any:
__UpperCAmelCase: Optional[int] = state.alphas_cumprod
__UpperCAmelCase: Any = alphas_cumprod[timesteps] ** 0.5
__UpperCAmelCase: str = sqrt_alpha_prod.flatten()
__UpperCAmelCase: Union[str, Any] = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
__UpperCAmelCase: List[Any] = (1 - alphas_cumprod[timesteps]) ** 0.5
__UpperCAmelCase: Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
__UpperCAmelCase: List[str] = broadcast_to_shape_from_left(_lowercase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def UpperCamelCase__ ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) -> Union[str, Any]:
__UpperCAmelCase, __UpperCAmelCase: Tuple = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
__UpperCAmelCase: Optional[Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def UpperCamelCase__ ( _lowercase : CommonSchedulerState , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray , _lowercase : jnp.ndarray ) -> Optional[Any]:
__UpperCAmelCase, __UpperCAmelCase: Optional[int] = get_sqrt_alpha_prod(_lowercase , _lowercase , _lowercase , _lowercase )
__UpperCAmelCase: List[str] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 523
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
def lowercase ( _snake_case : Optional[Any] ) ->List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_snake_case , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_snake_case , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_snake_case ):
return [[videos]]
raise ValueError(f"""Could not make batched video from {videos}""" )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =['pixel_values']
def __init__(self , a_ = True , a_ = None , a_ = PILImageResampling.BILINEAR , a_ = True , a_ = None , a_ = True , a_ = 1 / 2_55 , a_ = True , a_ = True , a_ = None , a_ = None , **a_ , ):
'''simple docstring'''
super().__init__(**a_ )
__snake_case : Dict = size if size is not None else {'''shortest_edge''': 2_56}
__snake_case : Optional[Any] = get_size_dict(a_ , default_to_square=a_ )
__snake_case : List[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
__snake_case : Optional[Any] = get_size_dict(a_ , param_name='''crop_size''' )
__snake_case : Union[str, Any] = do_resize
__snake_case : Any = size
__snake_case : Optional[Any] = do_center_crop
__snake_case : str = crop_size
__snake_case : Any = resample
__snake_case : Optional[int] = do_rescale
__snake_case : Tuple = rescale_factor
__snake_case : Union[str, Any] = offset
__snake_case : Dict = do_normalize
__snake_case : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ = PILImageResampling.BILINEAR , a_ = None , **a_ , ):
'''simple docstring'''
__snake_case : int = get_size_dict(a_ , default_to_square=a_ )
if "shortest_edge" in size:
__snake_case : List[str] = get_resize_output_image_size(a_ , size['''shortest_edge'''] , default_to_square=a_ )
elif "height" in size and "width" in size:
__snake_case : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(a_ , size=a_ , resample=a_ , data_format=a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ = None , **a_ , ):
'''simple docstring'''
__snake_case : Any = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(a_ , size=(size['''height'''], size['''width''']) , data_format=a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ = True , a_ = None , **a_ , ):
'''simple docstring'''
__snake_case : str = image.astype(np.floataa )
if offset:
__snake_case : Union[str, Any] = image - (scale / 2)
return rescale(a_ , scale=a_ , data_format=a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ , a_ = None , **a_ , ):
'''simple docstring'''
return normalize(a_ , mean=a_ , std=a_ , data_format=a_ , **a_ )
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
if offset and not do_rescale:
raise ValueError('''For offset, do_rescale must also be set to True.''' )
# All transformations expect numpy arrays.
__snake_case : Tuple = to_numpy_array(a_ )
if do_resize:
__snake_case : str = self.resize(image=a_ , size=a_ , resample=a_ )
if do_center_crop:
__snake_case : Optional[int] = self.center_crop(a_ , size=a_ )
if do_rescale:
__snake_case : str = self.rescale(image=a_ , scale=a_ , offset=a_ )
if do_normalize:
__snake_case : str = self.normalize(image=a_ , mean=a_ , std=a_ )
__snake_case : List[str] = to_channel_dimension_format(a_ , a_ )
return image
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ):
'''simple docstring'''
__snake_case : List[str] = do_resize if do_resize is not None else self.do_resize
__snake_case : Optional[int] = resample if resample is not None else self.resample
__snake_case : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
__snake_case : Tuple = do_rescale if do_rescale is not None else self.do_rescale
__snake_case : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case : Optional[Any] = offset if offset is not None else self.offset
__snake_case : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
__snake_case : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
__snake_case : Dict = image_std if image_std is not None else self.image_std
__snake_case : List[str] = size if size is not None else self.size
__snake_case : Optional[int] = get_size_dict(a_ , default_to_square=a_ )
__snake_case : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
__snake_case : Optional[int] = get_size_dict(a_ , param_name='''crop_size''' )
if not valid_images(a_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
__snake_case : Dict = make_batched(a_ )
__snake_case : Optional[Any] = [
[
self._preprocess_image(
image=a_ , do_resize=a_ , size=a_ , resample=a_ , do_center_crop=a_ , crop_size=a_ , do_rescale=a_ , rescale_factor=a_ , offset=a_ , do_normalize=a_ , image_mean=a_ , image_std=a_ , data_format=a_ , )
for img in video
]
for video in videos
]
__snake_case : Union[str, Any] = {'''pixel_values''': videos}
return BatchFeature(data=a_ , tensor_type=a_ )
| 229
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ ='sew-d'
def __init__(self , a_=32 , a_=7_68 , a_=12 , a_=12 , a_=30_72 , a_=2 , a_=5_12 , a_=2_56 , a_=True , a_=True , a_=("p2c", "c2p") , a_="layer_norm" , a_="gelu_python" , a_=0.1 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.1 , a_=0.02 , a_=1E-7 , a_=1E-5 , a_="group" , a_="gelu" , a_=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , a_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , a_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , a_=False , a_=1_28 , a_=16 , a_=True , a_=0.05 , a_=10 , a_=2 , a_=0.0 , a_=10 , a_=0 , a_="mean" , a_=False , a_=False , a_=2_56 , a_=0 , a_=1 , a_=2 , **a_ , ):
'''simple docstring'''
super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ )
__snake_case : Any = hidden_size
__snake_case : Tuple = feat_extract_norm
__snake_case : int = feat_extract_activation
__snake_case : List[str] = list(a_ )
__snake_case : Optional[Any] = list(a_ )
__snake_case : List[str] = list(a_ )
__snake_case : List[str] = conv_bias
__snake_case : Dict = num_conv_pos_embeddings
__snake_case : str = num_conv_pos_embedding_groups
__snake_case : int = len(self.conv_dim )
__snake_case : List[Any] = num_hidden_layers
__snake_case : List[Any] = intermediate_size
__snake_case : Dict = squeeze_factor
__snake_case : Optional[int] = max_position_embeddings
__snake_case : List[Any] = position_buckets
__snake_case : Union[str, Any] = share_att_key
__snake_case : Tuple = relative_attention
__snake_case : str = norm_rel_ebd
__snake_case : Tuple = list(a_ )
__snake_case : Optional[int] = hidden_act
__snake_case : int = num_attention_heads
__snake_case : Optional[Any] = hidden_dropout
__snake_case : Union[str, Any] = attention_dropout
__snake_case : Any = activation_dropout
__snake_case : Tuple = feat_proj_dropout
__snake_case : str = final_dropout
__snake_case : str = layer_norm_eps
__snake_case : Tuple = feature_layer_norm_eps
__snake_case : Tuple = initializer_range
__snake_case : int = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect.'''
'''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'''
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case : Union[str, Any] = apply_spec_augment
__snake_case : str = mask_time_prob
__snake_case : Optional[Any] = mask_time_length
__snake_case : List[Any] = mask_time_min_masks
__snake_case : str = mask_feature_prob
__snake_case : List[str] = mask_feature_length
__snake_case : Optional[int] = mask_feature_min_masks
# ctc loss
__snake_case : Union[str, Any] = ctc_loss_reduction
__snake_case : Optional[Any] = ctc_zero_infinity
# sequence classification
__snake_case : str = use_weighted_layer_sum
__snake_case : Any = classifier_proj_size
@property
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 229
| 1
|
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spiece.model'''}
_lowercase = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
}
}
_lowercase = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
# Segments (not really needed)
_lowercase = 0
_lowercase = 1
_lowercase = 2
_lowercase = 3
_lowercase = 4
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : Dict = VOCAB_FILES_NAMES
_lowerCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Union[str, Any] = """left"""
def __init__( self , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<sep>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<cls>" , _lowerCamelCase="<mask>" , _lowerCamelCase=["<eop>", "<eod>"] , _lowerCamelCase = None , **_lowerCamelCase , ) -> None:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
__lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowerCamelCase , remove_space=_lowerCamelCase , keep_accents=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
__lowercase = 3
__lowercase = do_lower_case
__lowercase = remove_space
__lowercase = keep_accents
__lowercase = vocab_file
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return len(self.sp_model )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.__dict__.copy()
__lowercase = None
return state
def __setstate__( self , _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
__lowercase = {}
__lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
if self.remove_space:
__lowercase = " ".join(inputs.strip().split() )
else:
__lowercase = inputs
__lowercase = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
__lowercase = unicodedata.normalize("NFKD" , _lowerCamelCase )
__lowercase = "".join([c for c in outputs if not unicodedata.combining(_lowerCamelCase )] )
if self.do_lower_case:
__lowercase = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = self.preprocess_text(_lowerCamelCase )
__lowercase = self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
__lowercase = []
for piece in pieces:
if len(_lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
__lowercase = self.sp_model.EncodeAsPieces(piece[:-1].replace(_lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__lowercase = cur_pieces[1:]
else:
__lowercase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_lowerCamelCase )
else:
new_pieces.append(_lowerCamelCase )
return new_pieces
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
return self.sp_model.PieceToId(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> int:
'''simple docstring'''
return self.sp_model.IdToPiece(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = "".join(_lowerCamelCase ).replace(_lowerCamelCase , " " ).strip()
return out_string
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = True , **_lowerCamelCase , ) -> str:
'''simple docstring'''
__lowercase = kwargs.pop("use_source_tokenizer" , _lowerCamelCase )
__lowercase = self.convert_ids_to_tokens(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
__lowercase = []
__lowercase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
__lowercase = []
sub_texts.append(_lowerCamelCase )
else:
current_sub_text.append(_lowerCamelCase )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(_lowerCamelCase ) )
# Mimic the behavior of the Rust tokenizer:
# By default, there are no spaces between special tokens
__lowercase = "".join(_lowerCamelCase )
__lowercase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
__lowercase = self.clean_up_tokenization(_lowerCamelCase )
return clean_text
else:
return text
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is not None:
return ([0] * len(_lowerCamelCase )) + [1] + ([0] * len(_lowerCamelCase )) + [1, 1]
return ([0] * len(_lowerCamelCase )) + [1, 1]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , "wb" ) as fi:
__lowercase = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 118
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowercase = {
'''vocab_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'''},
'''merges_file''': {'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'''},
'''tokenizer_config_file''': {
'''facebook/blenderbot-3B''': '''https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'''
},
}
_lowercase = {'''facebook/blenderbot-3B''': 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase__ ( ) ->Any:
__lowercase = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
__lowercase = bs[:]
__lowercase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__magic_name__ )
cs.append(2**8 + n )
n += 1
__lowercase = [chr(__magic_name__ ) for n in cs]
return dict(zip(__magic_name__ , __magic_name__ ) )
def lowerCAmelCase__ ( __magic_name__ ) ->Tuple:
__lowercase = set()
__lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowercase = char
return pairs
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : List[str] = VOCAB_FILES_NAMES
_lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase : Dict = ["""input_ids""", """attention_mask"""]
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="replace" , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase="<mask>" , _lowerCamelCase=False , **_lowerCamelCase , ) -> int:
'''simple docstring'''
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowercase = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding="utf-8" ) as vocab_handle:
__lowercase = json.load(_lowerCamelCase )
__lowercase = {v: k for k, v in self.encoder.items()}
__lowercase = errors # how to handle errors in decoding
__lowercase = bytes_to_unicode()
__lowercase = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase , encoding="utf-8" ) as merges_handle:
__lowercase = merges_handle.read().split("\n" )[1:-1]
__lowercase = [tuple(merge.split() ) for merge in bpe_merges]
__lowercase = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
__lowercase = {}
__lowercase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowercase = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> int:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowercase = tuple(_lowerCamelCase )
__lowercase = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
__lowercase = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowercase , __lowercase = bigram
__lowercase = []
__lowercase = 0
while i < len(_lowerCamelCase ):
try:
__lowercase = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowercase = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowercase = tuple(_lowerCamelCase )
__lowercase = new_word
if len(_lowerCamelCase ) == 1:
break
else:
__lowercase = get_pairs(_lowerCamelCase )
__lowercase = " ".join(_lowerCamelCase )
__lowercase = word
return word
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = []
for token in re.findall(self.pat , _lowerCamelCase ):
__lowercase = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(" " ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> Tuple:
'''simple docstring'''
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> str:
'''simple docstring'''
return self.decoder.get(_lowerCamelCase )
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[str]:
'''simple docstring'''
__lowercase = "".join(_lowerCamelCase )
__lowercase = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowercase = os.path.join(
_lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + "\n" )
__lowercase = 0
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
__lowercase = token_index
writer.write(" ".join(_lowerCamelCase ) + "\n" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[int]:
'''simple docstring'''
__lowercase = [self.sep_token_id]
__lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase=False , **_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
__lowercase = " " + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = None ) -> Tuple:
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase ) -> List[int]:
'''simple docstring'''
__lowercase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(_lowerCamelCase )
__lowercase = " ".join(_lowerCamelCase )
__lowercase = self.encode(_lowerCamelCase )
if len(_lowerCamelCase ) > self.model_max_length:
__lowercase = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 118
| 1
|
class A :
def __init__( self : Optional[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = 0
UpperCamelCase_ = 0
UpperCamelCase_ = {}
def lowercase__ ( self : str , __UpperCAmelCase : int ) -> Union[str, Any]:
"""simple docstring"""
if vertex not in self.adjacency:
UpperCamelCase_ = {}
self.num_vertices += 1
def lowercase__ ( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
self.add_vertex(_snake_case )
self.add_vertex(_snake_case )
if head == tail:
return
UpperCamelCase_ = weight
UpperCamelCase_ = weight
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.get_edges()
for edge in edges:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = edge
edges.remove((tail, head, weight) )
for i in range(len(_snake_case ) ):
UpperCamelCase_ = list(edges[i] )
edges.sort(key=lambda __UpperCAmelCase : e[2] )
for i in range(len(_snake_case ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
UpperCamelCase_ = edges[i][2] + 1
for edge in edges:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = edge
UpperCamelCase_ = weight
UpperCamelCase_ = weight
def __str__( self : int ) -> Any:
"""simple docstring"""
UpperCamelCase_ = ''
for tail in self.adjacency:
for head in self.adjacency[tail]:
UpperCamelCase_ = self.adjacency[head][tail]
string += f'''{head} -> {tail} == {weight}\n'''
return string.rstrip('\n' )
def lowercase__ ( self : List[str] ) -> int:
"""simple docstring"""
UpperCamelCase_ = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return self.adjacency.keys()
@staticmethod
def lowercase__ ( __UpperCAmelCase : Any=None , __UpperCAmelCase : List[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = Graph()
if vertices is None:
UpperCamelCase_ = []
if edges is None:
UpperCamelCase_ = []
for vertex in vertices:
g.add_vertex(_snake_case )
for edge in edges:
g.add_edge(*_snake_case )
return g
class A :
def __init__( self : Any ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ = {}
UpperCamelCase_ = {}
def __len__( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
return len(self.parent )
def lowercase__ ( self : Optional[int] , __UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
if item in self.parent:
return self.find(_snake_case )
UpperCamelCase_ = item
UpperCamelCase_ = 0
return item
def lowercase__ ( self : List[str] , __UpperCAmelCase : Tuple ) -> Optional[Any]:
"""simple docstring"""
if item not in self.parent:
return self.make_set(_snake_case )
if item != self.parent[item]:
UpperCamelCase_ = self.find(self.parent[item] )
return self.parent[item]
def lowercase__ ( self : Dict , __UpperCAmelCase : Dict , __UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ = self.find(_snake_case )
UpperCamelCase_ = self.find(_snake_case )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
UpperCamelCase_ = roota
return roota
if self.rank[roota] < self.rank[roota]:
UpperCamelCase_ = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
UpperCamelCase_ = roota
return roota
return None
@staticmethod
def lowercase__ ( __UpperCAmelCase : List[Any] ) -> str:
"""simple docstring"""
UpperCamelCase_ = graph.num_vertices
UpperCamelCase_ = Graph.UnionFind()
UpperCamelCase_ = []
while num_components > 1:
UpperCamelCase_ = {}
for vertex in graph.get_vertices():
UpperCamelCase_ = -1
UpperCamelCase_ = graph.get_edges()
for edge in edges:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = edge
edges.remove((tail, head, weight) )
for edge in edges:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = edge
UpperCamelCase_ = union_find.find(_snake_case )
UpperCamelCase_ = union_find.find(_snake_case )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCamelCase_ = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
UpperCamelCase_ = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = cheap_edge[vertex]
if union_find.find(_snake_case ) != union_find.find(_snake_case ):
union_find.union(_snake_case , _snake_case )
mst_edges.append(cheap_edge[vertex] )
UpperCamelCase_ = num_components - 1
UpperCamelCase_ = Graph.build(edges=_snake_case )
return mst
| 708
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__a : Optional[int] = None
__a : List[Any] = logging.get_logger(__name__)
__a : int = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__a : Optional[Any] = {
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
},
"""tokenizer_file""": {
"""google/bigbird-roberta-base""": (
"""https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"""
),
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"""
),
},
}
__a : int = {
"""google/bigbird-roberta-base""": 40_96,
"""google/bigbird-roberta-large""": 40_96,
"""google/bigbird-base-trivia-itc""": 40_96,
}
__a : Tuple = """▁"""
class A ( lowerCamelCase_ ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE : Dict = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE : List[str] = BigBirdTokenizer
_SCREAMING_SNAKE_CASE : Optional[int] = ['''input_ids''', '''attention_mask''']
_SCREAMING_SNAKE_CASE : List[int] = []
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[Any]="<unk>" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : Optional[Any]="</s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : Dict="[SEP]" , __UpperCAmelCase : Optional[Any]="[MASK]" , __UpperCAmelCase : List[str]="[CLS]" , **__UpperCAmelCase : str , ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else bos_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else eos_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else unk_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else pad_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else cls_token
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase_ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCamelCase_ = vocab_file
UpperCamelCase_ = False if not self.vocab_file else True
def lowercase__ ( self : Tuple , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase__ ( self : Union[str, Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
def lowercase__ ( self : Any , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase_ = [self.sep_token_id]
UpperCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ = os.path.join(
__UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 559
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ ):
snake_case = ["""pixel_values"""]
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int = True , SCREAMING_SNAKE_CASE_ : int = 32 , SCREAMING_SNAKE_CASE_ : str=PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE_ : str = True , **SCREAMING_SNAKE_CASE_ : Optional[Any] , ):
lowerCamelCase__ = do_resize
lowerCamelCase__ = do_rescale
lowerCamelCase__ = size_divisor
lowerCamelCase__ = resample
super().__init__(**__UpperCAmelCase )
def __UpperCAmelCase ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] = None , **SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCamelCase__ = get_image_size(__UpperCAmelCase )
# Rounds the height and width down to the closest multiple of size_divisor
lowerCamelCase__ = height // size_divisor * size_divisor
lowerCamelCase__ = width // size_divisor * size_divisor
lowerCamelCase__ = resize(__UpperCAmelCase , (new_h, new_w) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
return image
def __UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple = None , **SCREAMING_SNAKE_CASE_ : Optional[Any] ):
return rescale(image=__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] = None , SCREAMING_SNAKE_CASE_ : Optional[Any] = None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : str = None , SCREAMING_SNAKE_CASE_ : List[str] = None , SCREAMING_SNAKE_CASE_ : List[str] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ = size_divisor if size_divisor is not None else self.size_divisor
lowerCamelCase__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("""size_divisor is required for resizing""" )
lowerCamelCase__ = make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError("""Invalid image(s)""" )
# All transformations expect numpy arrays.
lowerCamelCase__ = [to_numpy_array(__UpperCAmelCase ) for img in images]
if do_resize:
lowerCamelCase__ = [self.resize(__UpperCAmelCase , size_divisor=__UpperCAmelCase , resample=__UpperCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase__ = [self.rescale(__UpperCAmelCase , scale=1 / 255 ) for image in images]
lowerCamelCase__ = [to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
lowerCamelCase__ = {"pixel_values": images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 129
|
def lowerCamelCase_ ( _lowercase , _lowercase ) -> int:
if len(_lowercase ) != len(_lowercase ):
raise ValueError("String lengths must match!" )
__A : Union[str, Any] = 0
for chara, chara in zip(_lowercase , _lowercase ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 520
| 0
|
"""simple docstring"""
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_A = logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class _lowercase ( __UpperCAmelCase ):
def __init__( self , *UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ ) -> Union[str, Any]:
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : Dict = eval_examples
lowerCamelCase : List[str] = post_process_function
lowerCamelCase : int = quant_trainer_args
lowerCamelCase : Dict = 128 # default number of calibration samples
def _UpperCamelCase ( self , UpperCAmelCase_=None ) -> List[str]:
if calib_dataset is None and self.calib_dataset is None:
raise ValueError('Trainer: calibration requires an calib_dataset.' )
lowerCamelCase : Optional[int] = calib_dataset if calib_dataset is not None else self.calib_dataset
lowerCamelCase : Any = self._remove_unused_columns(UpperCAmelCase_ , description='Calibration' )
return DataLoader(
UpperCAmelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCAmelCase_ , )
def _UpperCamelCase ( self , UpperCAmelCase_=None ) -> List[Any]:
lowerCamelCase : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset
lowerCamelCase : int = self.get_calib_dataloader(UpperCAmelCase_ )
lowerCamelCase : Tuple = self.model
quant_trainer.configure_model(UpperCAmelCase_ , self.quant_trainer_args , calib=UpperCAmelCase_ )
model.eval()
quant_trainer.enable_calibration(UpperCAmelCase_ )
logger.info('***** Running calibration *****' )
logger.info(F""" Num examples = {self.calib_num}""" )
logger.info(F""" Batch size = {calib_dataloader.batch_size}""" )
for step, inputs in enumerate(UpperCAmelCase_ ):
# Prediction step
lowerCamelCase : Optional[Any] = self.prediction_step(UpperCAmelCase_ , UpperCAmelCase_ , prediction_loss_only=UpperCAmelCase_ )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(UpperCAmelCase_ , self.quant_trainer_args )
lowerCamelCase : Tuple = model
def _UpperCamelCase ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_ = "eval" ) -> List[str]:
lowerCamelCase : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase : int = self.get_eval_dataloader(UpperCAmelCase_ )
lowerCamelCase : Dict = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase : Union[str, Any] = self.compute_metrics
lowerCamelCase : str = None
lowerCamelCase : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase : Any = eval_loop(
UpperCAmelCase_ , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , )
finally:
lowerCamelCase : Dict = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
lowerCamelCase : Dict = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions )
lowerCamelCase : List[str] = self.compute_metrics(UpperCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowerCamelCase : Optional[int] = metrics.pop(UpperCAmelCase_ )
self.log(UpperCAmelCase_ )
else:
lowerCamelCase : List[Any] = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase : List[str] = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase_ )
return metrics
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_ = "test" ) -> int:
lowerCamelCase : List[Any] = self.get_test_dataloader(UpperCAmelCase_ )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase : str = self.compute_metrics
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase : List[str] = eval_loop(
UpperCAmelCase_ , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase_ , )
finally:
lowerCamelCase : str = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase : int = self.post_process_function(UpperCAmelCase_ , UpperCAmelCase_ , output.predictions , 'predict' )
lowerCamelCase : List[str] = self.compute_metrics(UpperCAmelCase_ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
lowerCamelCase : Dict = metrics.pop(UpperCAmelCase_ )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_="./" ) -> List[str]:
lowerCamelCase : List[Any] = self.eval_dataset
lowerCamelCase : List[str] = self.get_eval_dataloader(UpperCAmelCase_ )
lowerCamelCase : Optional[Any] = next(iter(UpperCAmelCase_ ) )
# saving device - to make it consistent
lowerCamelCase : int = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
# convert to tuple
lowerCamelCase : Optional[Any] = tuple(v.to(UpperCAmelCase_ ) for k, v in batch.items() )
logger.info('Converting model to be onnx compatible' )
from pytorch_quantization.nn import TensorQuantizer
lowerCamelCase : List[Any] = True
lowerCamelCase : Optional[Any] = self.model.to(UpperCAmelCase_ )
model.eval()
model.float()
lowerCamelCase : Tuple = model.module if hasattr(UpperCAmelCase_ , 'module' ) else model
quant_trainer.configure_model(UpperCAmelCase_ , self.quant_trainer_args )
lowerCamelCase : Union[str, Any] = os.path.join(UpperCAmelCase_ , 'model.onnx' )
logger.info(F"""exporting model to {output_model_file}""" )
lowerCamelCase : str = {0: 'batch_size', 1: 'seq_len'}
torch.onnx.export(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , export_params=UpperCAmelCase_ , opset_version=13 , do_constant_folding=UpperCAmelCase_ , input_names=['input_ids', 'attention_mask', 'token_type_ids'] , output_names=['output_start_logits', 'output_end_logits'] , dynamic_axes={
'input_ids': axes,
'attention_mask': axes,
'token_type_ids': axes,
'output_start_logits': axes,
'output_end_logits': axes,
} , verbose=UpperCAmelCase_ , )
logger.info('onnx export finished' )
| 720
|
"""simple docstring"""
from math import ceil
def UpperCAmelCase ( a_ = 1001 ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = 1
for i in range(1, int(ceil(n / 2.0 ) ) ):
lowerCamelCase : int = 2 * i + 1
lowerCamelCase : int = 2 * i
lowerCamelCase : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_A = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 133
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
snake_case__ : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""pixel_values"""]
def __init__( self , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = True , _UpperCAmelCase = 1 / 255 , _UpperCAmelCase = True , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = True , **_UpperCAmelCase , ) -> None:
super().__init__(**_UpperCAmelCase )
UpperCamelCase_ = size if size is not None else {'shortest_edge': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase , param_name='crop_size' )
UpperCamelCase_ = do_resize
UpperCamelCase_ = size
UpperCamelCase_ = resample
UpperCamelCase_ = do_center_crop
UpperCamelCase_ = crop_size
UpperCamelCase_ = do_rescale
UpperCamelCase_ = rescale_factor
UpperCamelCase_ = do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCamelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCamelCase_ = do_convert_rgb
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = PILImageResampling.BICUBIC , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCamelCase_ = get_resize_output_image_size(_UpperCAmelCase , size=size['shortest_edge'] , default_to_square=_UpperCAmelCase )
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
UpperCamelCase_ = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_UpperCAmelCase , size=(size['height'], size['width']) , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> str:
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , **_UpperCAmelCase , ) -> np.ndarray:
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = ChannelDimension.FIRST , **_UpperCAmelCase , ) -> PIL.Image.Image:
UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase_ = size if size is not None else self.size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='size' , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = resample if resample is not None else self.resample
UpperCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCamelCase_ = get_size_dict(_UpperCAmelCase , param_name='crop_size' , default_to_square=_UpperCAmelCase )
UpperCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase_ = image_std if image_std is not None else self.image_std
UpperCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCamelCase_ = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCamelCase_ = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCamelCase_ = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
UpperCamelCase_ = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
UpperCamelCase_ = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images]
if do_rescale:
UpperCamelCase_ = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
UpperCamelCase_ = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
UpperCamelCase_ = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
UpperCamelCase_ = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
| 23
|
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case__ : Dict = 1_6
snake_case__ : List[str] = 3_2
def _snake_case (__lowercase , __lowercase = 16):
UpperCamelCase_ = AutoTokenizer.from_pretrained('bert-base-cased')
UpperCamelCase_ = load_dataset('glue' , 'mrpc')
def tokenize_function(__lowercase):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase_ = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase)
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCamelCase_ = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase_ = tokenized_datasets.rename_column('label' , 'labels')
def collate_fn(__lowercase):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCamelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCamelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCamelCase_ = 8
else:
UpperCamelCase_ = None
return tokenizer.pad(
__lowercase , padding='longest' , max_length=__lowercase , pad_to_multiple_of=__lowercase , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCamelCase_ = DataLoader(
tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
UpperCamelCase_ = DataLoader(
tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=__lowercase)
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case__ : List[str] = mocked_dataloaders # noqa: F811
def _snake_case (__lowercase , __lowercase):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , __lowercase) == "1":
UpperCamelCase_ = 2
# New Code #
UpperCamelCase_ = int(args.gradient_accumulation_steps)
# Initialize accelerator
UpperCamelCase_ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__lowercase)
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`')
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase_ = config['lr']
UpperCamelCase_ = int(config['num_epochs'])
UpperCamelCase_ = int(config['seed'])
UpperCamelCase_ = int(config['batch_size'])
UpperCamelCase_ = evaluate.load('glue' , 'mrpc')
set_seed(__lowercase)
UpperCamelCase_ , UpperCamelCase_ = get_dataloaders(__lowercase , __lowercase)
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase_ = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__lowercase)
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCamelCase_ = model.to(accelerator.device)
# Instantiate optimizer
UpperCamelCase_ = AdamW(params=model.parameters() , lr=__lowercase)
# Instantiate scheduler
UpperCamelCase_ = get_linear_schedule_with_warmup(
optimizer=__lowercase , num_warmup_steps=100 , num_training_steps=(len(__lowercase) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = accelerator.prepare(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# Now we train the model
for epoch in range(__lowercase):
model.train()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__lowercase):
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = output.loss
accelerator.backward(__lowercase)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__lowercase):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device)
with torch.no_grad():
UpperCamelCase_ = model(**__lowercase)
UpperCamelCase_ = outputs.logits.argmax(dim=-1)
UpperCamelCase_ , UpperCamelCase_ = accelerator.gather_for_metrics((predictions, batch['labels']))
metric.add_batch(
predictions=__lowercase , references=__lowercase , )
UpperCamelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , __lowercase)
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Simple example of training script.')
parser.add_argument(
'--mixed_precision' , type=__lowercase , default=__lowercase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=__lowercase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__lowercase , __lowercase)
if __name__ == "__main__":
main()
| 23
| 1
|
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCamelCase_ ( unittest.TestCase ):
lowercase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowercase( self , A , A , A ) -> Optional[int]:
UpperCAmelCase : str = hf_hub_download(
repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
UpperCAmelCase : str = VideoClassificationPipeline(model=_lowercase , image_processor=_lowercase , top_k=2 )
UpperCAmelCase : Any = [
example_video_filepath,
"""https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""",
]
return video_classifier, examples
def _lowercase( self , A , A ) -> Tuple:
for example in examples:
UpperCAmelCase : Dict = video_classifier(_lowercase )
self.assertEqual(
_lowercase , [
{"""score""": ANY(_lowercase ), """label""": ANY(_lowercase )},
{"""score""": ANY(_lowercase ), """label""": ANY(_lowercase )},
] , )
@require_torch
def _lowercase( self ) -> str:
UpperCAmelCase : Optional[Any] = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification"""
UpperCAmelCase : Tuple = VideoMAEFeatureExtractor(
size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} )
UpperCAmelCase : Any = pipeline(
"""video-classification""" , model=_lowercase , feature_extractor=_lowercase , frame_sampling_rate=4 )
UpperCAmelCase : Dict = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" )
UpperCAmelCase : str = video_classifier(_lowercase , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}] , )
UpperCAmelCase : List[Any] = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
[{"""score""": 0.5_1_9_9, """label""": """LABEL_0"""}, {"""score""": 0.4_8_0_1, """label""": """LABEL_1"""}],
] , )
@require_tf
def _lowercase( self ) -> Optional[Any]:
pass
| 719
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : Any = get_logger()
a : Optional[dict] = None
class UpperCamelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self , A=None , A=None , **A ) -> str:
super().__init__(features=A )
import jax
from jaxlib.xla_client import Device
if isinstance(A , A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(A )}, as `jaxlib.xla_extension.Device` '''
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
UpperCAmelCase : Optional[int] = device if isinstance(A , A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
UpperCAmelCase : List[Any] = str(jax.devices()[0] )
UpperCAmelCase : Union[str, Any] = jnp_array_kwargs
@staticmethod
def _lowercase( ) -> Dict[str, "jaxlib.xla_extension.Device"]:
import jax
return {str(A ): device for device in jax.devices()}
def _lowercase( self , A ) -> str:
import jax
import jax.numpy as jnp
if isinstance(A , A ) and column:
if all(
isinstance(A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(A , axis=0 )
return column
def _lowercase( self , A ) -> Tuple:
import jax
import jax.numpy as jnp
if isinstance(A , (str, bytes, type(A )) ):
return value
elif isinstance(A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase : List[str] = {}
if isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase : str = {"""dtype""": jnp.intaa}
else:
UpperCAmelCase : int = {"""dtype""": jnp.intaa}
elif isinstance(A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase : Any = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(A , PIL.Image.Image ):
UpperCAmelCase : List[str] = np.asarray(A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase( self , A ) -> Tuple:
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(A , """__array__""" ) and not isinstance(A , jax.Array ):
UpperCAmelCase : Optional[int] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
elif isinstance(A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(A ) for substruct in data_struct] )
return self._tensorize(A )
def _lowercase( self , A ) -> Dict:
return map_nested(self._recursive_tensorize , A , map_list=A )
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_row(A )
UpperCAmelCase : Dict = self.python_features_decoder.decode_row(A )
return self.recursive_tensorize(A )
def _lowercase( self , A ) -> "jax.Array":
UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(A )
UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(A , pa_table.column_names[0] )
UpperCAmelCase : Optional[int] = self.recursive_tensorize(A )
UpperCAmelCase : Any = self._consolidate(A )
return column
def _lowercase( self , A ) -> Mapping:
UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(A )
UpperCAmelCase : List[str] = self.python_features_decoder.decode_batch(A )
UpperCAmelCase : Union[str, Any] = self.recursive_tensorize(A )
for column_name in batch:
UpperCAmelCase : Optional[Any] = self._consolidate(batch[column_name] )
return batch
| 672
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Tuple = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Any = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72
|
'''simple docstring'''
import unicodedata
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
from transformers.data.data_collator import DataCollatorMixin
from transformers.file_utils import PaddingStrategy
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
def UpperCamelCase ( lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
lowercase =np.full((len(lowercase_ ), sequence_length, 2) , lowercase_ )
else:
lowercase =np.full((len(lowercase_ ), sequence_length) , lowercase_ )
for i, tensor in enumerate(lowercase_ ):
if padding_side == "right":
if isinstance(lowercase_ , lowercase_ ):
lowercase =tensor[:sequence_length]
else:
lowercase =tensor[:sequence_length]
else:
if isinstance(lowercase_ , lowercase_ ):
lowercase =tensor[:sequence_length]
else:
lowercase =tensor[:sequence_length]
return out_tensor.tolist()
def UpperCamelCase ( lowercase_ : Optional[Any] ) -> str:
'''simple docstring'''
lowercase =ord(lowercase_ )
if (cp >= 3_3 and cp <= 4_7) or (cp >= 5_8 and cp <= 6_4) or (cp >= 9_1 and cp <= 9_6) or (cp >= 1_2_3 and cp <= 1_2_6):
return True
lowercase =unicodedata.category(lowercase_ )
if cat.startswith('''P''' ):
return True
return False
@dataclass
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 42
UpperCamelCase__ = True
UpperCamelCase__ = None
UpperCamelCase__ = None
UpperCamelCase__ = -1_00
UpperCamelCase__ = "pt"
def _A( self , snake_case_ ):
import torch
lowercase ='''label''' if '''label''' in features[0].keys() else '''labels'''
lowercase =[feature[label_name] for feature in features] if label_name in features[0].keys() else None
lowercase =self.tokenizer.pad(
snake_case_ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' if labels is None else None , )
if labels is None:
return batch
lowercase =torch.tensor(batch['''entity_ids'''] ).shape[1]
lowercase =self.tokenizer.padding_side
if padding_side == "right":
lowercase =[
list(snake_case_ ) + [self.label_pad_token_id] * (sequence_length - len(snake_case_ )) for label in labels
]
else:
lowercase =[
[self.label_pad_token_id] * (sequence_length - len(snake_case_ )) + list(snake_case_ ) for label in labels
]
lowercase =[feature['''ner_tags'''] for feature in features]
lowercase =padding_tensor(snake_case_ , -1 , snake_case_ , snake_case_ )
lowercase =[feature['''original_entity_spans'''] for feature in features]
lowercase =padding_tensor(snake_case_ , (-1, -1) , snake_case_ , snake_case_ )
lowercase ={k: torch.tensor(snake_case_ , dtype=torch.intaa ) for k, v in batch.items()}
return batch
| 72
| 1
|
'''simple docstring'''
lowercase_ = 8.314_4598
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''')
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''')
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
lowercase_ = 300
lowercase_ = 28
lowercase_ = rms_speed_of_molecule(temperature, molar_mass)
print(F"""Vrms of Nitrogen gas at 300 K is {vrms} m/s""")
| 701
|
'''simple docstring'''
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
lowercase_ = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
lowercase_ = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = (images / 2 + 0.5).clamp(0 , 1)
_a = images.cpu().permute(0 , 2 , 3 , 1).float().numpy()
_a = numpy_to_pil(__A)
return images
def lowerCAmelCase (__A):
"""simple docstring"""
if images.ndim == 3:
_a = images[None, ...]
_a = (images * 255).round().astype('''uint8''')
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
_a = [Image.fromarray(image.squeeze() , mode='''L''') for image in images]
else:
_a = [Image.fromarray(__A) for image in images]
return pil_images
| 352
| 0
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class lowercase_ :
"""simple docstring"""
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : List[Any], UpperCamelCase__ : int ) -> List[str]:
return None
class lowercase_ :
"""simple docstring"""
def __UpperCAmelCase ( self : str, UpperCamelCase__ : List[Any], UpperCamelCase__ : str, UpperCamelCase__ : Optional[int], UpperCamelCase__ : str ) -> int:
return None
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = [
# (model_name, model_kwargs)
("bert-base-cased", {}),
("gpt2", {"use_cache": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def __UpperCAmelCase ( self : int ) -> Union[str, Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCamelCase__, 'tf', 12, **UpperCamelCase__ )
@require_torch
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(UpperCamelCase__, 'pt', 12, **UpperCamelCase__ )
@require_torch
@slow
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
from transformers import BertModel
_A = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words']
with NamedTemporaryFile(mode='w+t' ) as vocab_file:
vocab_file.write('\n'.join(UpperCamelCase__ ) )
vocab_file.flush()
_A = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
_A = BertModel(BertConfig(vocab_size=len(UpperCamelCase__ ) ) )
model.save_pretrained(UpperCamelCase__ )
self._test_export(UpperCamelCase__, 'pt', 12, UpperCamelCase__ )
@require_tf
@slow
def __UpperCAmelCase ( self : str ) -> Optional[Any]:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_A = self._test_export(UpperCamelCase__, 'tf', 12, **UpperCamelCase__ )
_A = quantize(Path(UpperCamelCase__ ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCamelCase__ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
@require_torch
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_A = self._test_export(UpperCamelCase__, 'pt', 12, **UpperCamelCase__ )
_A = quantize(UpperCamelCase__ )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(UpperCamelCase__ ).stat().st_size:
self.fail('Quantized model is bigger than initial ONNX model' )
def __UpperCAmelCase ( self : Union[str, Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Optional[int], UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int]=None, **UpperCamelCase__ : Optional[Any] ) -> Tuple:
try:
# Compute path
with TemporaryDirectory() as tempdir:
_A = Path(UpperCamelCase__ ).joinpath('model.onnx' )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, **UpperCamelCase__ )
return path
except Exception as e:
self.fail(UpperCamelCase__ )
@require_torch
@require_tokenizers
@slow
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
from transformers import BertModel
_A = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_A = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(UpperCamelCase__, UpperCamelCase__, 'pt' )
@require_tf
@require_tokenizers
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
from transformers import TFBertModel
_A = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) )
_A = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' )
self._test_infer_dynamic_axis(UpperCamelCase__, UpperCamelCase__, 'tf' )
def __UpperCAmelCase ( self : Dict, UpperCamelCase__ : List[str], UpperCamelCase__ : Tuple, UpperCamelCase__ : Tuple ) -> Optional[Any]:
_A = FeatureExtractionPipeline(UpperCamelCase__, UpperCamelCase__ )
_A = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1']
_A , _A , _A , _A = infer_shapes(UpperCamelCase__, UpperCamelCase__ )
# Assert all variables are present
self.assertEqual(len(UpperCamelCase__ ), len(UpperCamelCase__ ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3], UpperCamelCase__ )
self.assertSequenceEqual(variable_names[3:], UpperCamelCase__ )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: 'batch', 1: 'sequence'} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes['output_0'], {0: 'batch', 1: 'sequence'} )
self.assertDictEqual(shapes['output_1'], {0: 'batch'} )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
_A = ['input_ids', 'attention_mask', 'token_type_ids']
_A = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]}
_A , _A = ensure_valid_input(FuncContiguousArgs(), UpperCamelCase__, UpperCamelCase__ )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(UpperCamelCase__ ), 3 )
# Should have exactly the same input names
self.assertEqual(set(UpperCamelCase__ ), set(UpperCamelCase__ ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(UpperCamelCase__, (tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_A , _A = ensure_valid_input(FuncNonContiguousArgs(), UpperCamelCase__, UpperCamelCase__ )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(UpperCamelCase__ ), 1 )
self.assertEqual(len(UpperCamelCase__ ), 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens['input_ids'] )
self.assertEqual(ordered_input_names[0], 'input_ids' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
_A = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ), '-test' )
self.assertEqual('/home/something/my_fake_model-test.onnx', generated.as_posix() )
| 107
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__SCREAMING_SNAKE_CASE =False
class UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : str = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase_ : int = torch.manual_seed(0 )
lowercase_ : Optional[int] = pipe.dual_guided(
prompt='first prompt' ,image=__UpperCamelCase ,text_to_image_strength=0.75 ,generator=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCamelCase )
lowercase_ : str = VersatileDiffusionPipeline.from_pretrained(__UpperCamelCase ,torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : List[Any] = generator.manual_seed(0 )
lowercase_ : Union[str, Any] = pipe.dual_guided(
prompt='first prompt' ,image=__UpperCamelCase ,text_to_image_strength=0.75 ,generator=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='numpy' ,).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Any = VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' ,torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
lowercase_ : int = 'cyberpunk 2077'
lowercase_ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
lowercase_ : Optional[Any] = torch.manual_seed(0 )
lowercase_ : int = pipe.dual_guided(
prompt=__UpperCamelCase ,image=__UpperCamelCase ,text_to_image_strength=0.75 ,generator=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ,).images
lowercase_ : int = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Union[str, Any] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase_ : Optional[Any] = 'A painting of a squirrel eating a burger '
lowercase_ : Optional[Any] = torch.manual_seed(0 )
lowercase_ : Dict = pipe.text_to_image(
prompt=__UpperCamelCase ,generator=__UpperCamelCase ,guidance_scale=7.5 ,num_inference_steps=50 ,output_type='numpy' ).images
lowercase_ : Tuple = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Tuple = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
lowercase_ : Dict = pipe.image_variation(__UpperCamelCase ,generator=__UpperCamelCase ,output_type='numpy' ).images
lowercase_ : Union[str, Any] = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase_ : Union[str, Any] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 425
| 0
|
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] ):
A__ = checkpoint
A__ = {}
A__ = vae_state_dict["""encoder.conv_in.weight"""]
A__ = vae_state_dict["""encoder.conv_in.bias"""]
A__ = vae_state_dict["""encoder.conv_out.weight"""]
A__ = vae_state_dict["""encoder.conv_out.bias"""]
A__ = vae_state_dict["""encoder.norm_out.weight"""]
A__ = vae_state_dict["""encoder.norm_out.bias"""]
A__ = vae_state_dict["""decoder.conv_in.weight"""]
A__ = vae_state_dict["""decoder.conv_in.bias"""]
A__ = vae_state_dict["""decoder.conv_out.weight"""]
A__ = vae_state_dict["""decoder.conv_out.bias"""]
A__ = vae_state_dict["""decoder.norm_out.weight"""]
A__ = vae_state_dict["""decoder.norm_out.bias"""]
A__ = vae_state_dict["""quant_conv.weight"""]
A__ = vae_state_dict["""quant_conv.bias"""]
A__ = vae_state_dict["""post_quant_conv.weight"""]
A__ = vae_state_dict["""post_quant_conv.bias"""]
# Retrieves the keys for the encoder down blocks only
A__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """encoder.down""" in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(UpperCAmelCase_ )
}
# Retrieves the keys for the decoder up blocks only
A__ = len({""".""".join(layer.split(""".""" )[:3] ) for layer in vae_state_dict if """decoder.up""" in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(UpperCAmelCase_ )
}
for i in range(UpperCAmelCase_ ):
A__ = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
A__ = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
A__ = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
A__ = renew_vae_resnet_paths(UpperCAmelCase_ )
A__ = {"""old""": F"""down.{i}.block""", """new""": F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , additional_replacements=[meta_path] , config=UpperCAmelCase_ )
A__ = [key for key in vae_state_dict if """encoder.mid.block""" in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
A__ = renew_vae_resnet_paths(UpperCAmelCase_ )
A__ = {"""old""": F"""mid.block_{i}""", """new""": F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , additional_replacements=[meta_path] , config=UpperCAmelCase_ )
A__ = [key for key in vae_state_dict if """encoder.mid.attn""" in key]
A__ = renew_vae_attention_paths(UpperCAmelCase_ )
A__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , additional_replacements=[meta_path] , config=UpperCAmelCase_ )
conv_attn_to_linear(UpperCAmelCase_ )
for i in range(UpperCAmelCase_ ):
A__ = num_up_blocks - 1 - i
A__ = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
A__ = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
A__ = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
A__ = renew_vae_resnet_paths(UpperCAmelCase_ )
A__ = {"""old""": F"""up.{block_id}.block""", """new""": F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , additional_replacements=[meta_path] , config=UpperCAmelCase_ )
A__ = [key for key in vae_state_dict if """decoder.mid.block""" in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
A__ = renew_vae_resnet_paths(UpperCAmelCase_ )
A__ = {"""old""": F"""mid.block_{i}""", """new""": F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , additional_replacements=[meta_path] , config=UpperCAmelCase_ )
A__ = [key for key in vae_state_dict if """decoder.mid.attn""" in key]
A__ = renew_vae_attention_paths(UpperCAmelCase_ )
A__ = {"""old""": """mid.attn_1""", """new""": """mid_block.attentions.0"""}
assign_to_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , additional_replacements=[meta_path] , config=UpperCAmelCase_ )
conv_attn_to_linear(UpperCAmelCase_ )
return new_checkpoint
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , ):
# Only support V1
A__ = requests.get(
""" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml""" )
A__ = io.BytesIO(r.content )
A__ = OmegaConf.load(UpperCAmelCase_ )
A__ = 512
A__ = """cuda""" if torch.cuda.is_available() else """cpu"""
if checkpoint_path.endswith("""safetensors""" ):
from safetensors import safe_open
A__ = {}
with safe_open(UpperCAmelCase_ , framework="""pt""" , device="""cpu""" ) as f:
for key in f.keys():
A__ = f.get_tensor(UpperCAmelCase_ )
else:
A__ = torch.load(UpperCAmelCase_ , map_location=UpperCAmelCase_ )["""state_dict"""]
# Convert the VAE model.
A__ = create_vae_diffusers_config(UpperCAmelCase_ , image_size=UpperCAmelCase_ )
A__ = custom_convert_ldm_vae_checkpoint(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = AutoencoderKL(**UpperCAmelCase_ )
vae.load_state_dict(UpperCAmelCase_ )
vae.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : int = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 500
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class a ( _lowerCamelCase ):
"""simple docstring"""
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(UpperCamelCase , """num_encoder_blocks""" ) )
class a :
"""simple docstring"""
def __init__( self: str , UpperCamelCase: Dict , UpperCamelCase: int=13 , UpperCamelCase: Optional[int]=64 , UpperCamelCase: List[Any]=3 , UpperCamelCase: List[Any]=4 , UpperCamelCase: Optional[Any]=[2, 2, 2, 2] , UpperCamelCase: Any=[8, 4, 2, 1] , UpperCamelCase: Optional[int]=[16, 32, 64, 1_28] , UpperCamelCase: str=[1, 4, 8, 16] , UpperCamelCase: Dict=[1, 2, 4, 8] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: List[str]="gelu" , UpperCamelCase: Tuple=0.1 , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: Tuple=0.02 , UpperCamelCase: int=3 , UpperCamelCase: str=None , ):
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = num_encoder_blocks
A__ = sr_ratios
A__ = depths
A__ = hidden_sizes
A__ = downsampling_rates
A__ = num_attention_heads
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = num_labels
A__ = scope
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Tuple , UpperCamelCase: str , UpperCamelCase: Optional[Any] , UpperCamelCase: int ):
"""simple docstring"""
A__ = SegformerModel(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
A__ = A__ = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCamelCase ( self: List[str] , UpperCamelCase: Tuple , UpperCamelCase: str , UpperCamelCase: List[str] ):
"""simple docstring"""
A__ = self.num_labels
A__ = SegformerForSemanticSegmentation(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = model(UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase ( self: Optional[Any] , UpperCamelCase: List[str] , UpperCamelCase: str , UpperCamelCase: Tuple ):
"""simple docstring"""
A__ = 1
A__ = SegformerForSemanticSegmentation(config=UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
A__ = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase )
A__ = model(UpperCamelCase , labels=UpperCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = SegformerModelTester(self )
A__ = SegformerConfigTester(self , config_class=UpperCamelCase )
def UpperCamelCase ( self: Any ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase )
def UpperCamelCase ( self: int ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def UpperCamelCase ( self: str ):
"""simple docstring"""
pass
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
A__ = True
A__ = False
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
A__ = sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ = (self.model_tester.image_size // 32) ** 2
A__ = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ = len(UpperCamelCase )
# Check attention is always last and order is fine
A__ = True
A__ = True
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase ) )
A__ = outputs.attentions
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# verify the first attentions (first block, first layer)
A__ = (self.model_tester.image_size // 4) ** 2
A__ = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase: Dict , UpperCamelCase: Tuple , UpperCamelCase: Dict ):
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) )
A__ = outputs.hidden_states
A__ = self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase ) , UpperCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase ):
continue
A__ = model_class(UpperCamelCase )
model.to(UpperCamelCase )
model.train()
A__ = self._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase )
A__ = model(**UpperCamelCase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
pass
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = SegformerModel.from_pretrained(UpperCamelCase )
self.assertIsNotNone(UpperCamelCase )
def _snake_case ( ):
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase ( self: Any ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
A__ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
A__ = encoded_inputs.pixel_values.to(UpperCamelCase )
with torch.no_grad():
A__ = model(UpperCamelCase )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase , atol=1e-4 ) )
@slow
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
A__ = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
A__ = encoded_inputs.pixel_values.to(UpperCamelCase )
with torch.no_grad():
A__ = model(UpperCamelCase )
A__ = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape , UpperCamelCase )
A__ = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase , atol=1e-1 ) )
@slow
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = SegformerImageProcessor(
image_scale=(5_12, 5_12) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
A__ = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
UpperCamelCase )
A__ = prepare_img()
A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" )
A__ = encoded_inputs.pixel_values.to(UpperCamelCase )
with torch.no_grad():
A__ = model(UpperCamelCase )
A__ = outputs.logits.detach().cpu()
A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase , target_sizes=[(5_00, 3_00)] )
A__ = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , UpperCamelCase )
A__ = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase )
A__ = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape , UpperCamelCase )
| 500
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase__ : List[str] = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case : Optional[int] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__snake_case : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 660
| 0
|
'''simple docstring'''
from datetime import datetime
import requests
from bsa import BeautifulSoup
if __name__ == "__main__":
_UpperCAmelCase : int = input('''Enter image url: ''').strip()
print(F"""Downloading image from {url} ...""")
_UpperCAmelCase : Any = BeautifulSoup(requests.get(url).content, '''html.parser''')
# The image URL is in the content field of the first meta tag with property og:image
_UpperCAmelCase : Union[str, Any] = soup.find('''meta''', {'''property''': '''og:image'''})['''content''']
_UpperCAmelCase : Dict = requests.get(image_url).content
_UpperCAmelCase : Tuple = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.jpg"""
with open(file_name, '''wb''') as fp:
fp.write(image_data)
print(F"""Done. Image saved to disk as {file_name}.""")
| 145
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Optional[Any] = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 145
| 1
|
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case_ ( __snake_case : int) -> Optional[Any]:
lowerCAmelCase_ = [False] * len(__snake_case)
lowerCAmelCase_ = [-1] * len(__snake_case)
def dfs(__snake_case : Any , __snake_case : Dict):
lowerCAmelCase_ = True
lowerCAmelCase_ = c
for u in graph[v]:
if not visited[u]:
dfs(__snake_case , 1 - c)
for i in range(len(__snake_case)):
if not visited[i]:
dfs(__snake_case , 0)
for i in range(len(__snake_case)):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
A_ : str ={0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 274
|
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
def UpperCAmelCase_ ( self , _lowerCamelCase ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
lowerCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_lowerCamelCase )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , only_pretrain_model=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , torchscript=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , fpaa=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
# set architectures equal to `None`
lowerCAmelCase_ = None
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_lowerCamelCase , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tinier_bart'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tinier_bart'''
lowerCAmelCase_ = AutoConfig.from_pretrained(_lowerCamelCase )
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase , configs=[config] )
lowerCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , save_to_csv=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_lowerCamelCase , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_lowerCamelCase , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_lowerCamelCase , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_lowerCamelCase , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_lowerCamelCase , '''env.csv''' ) , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''env.csv''' ) ).exists() )
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_lowerCamelCase ):
self.assertTrue(hasattr(_lowerCamelCase , '''sequential''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''cumulative''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''current''' ) )
self.assertTrue(hasattr(_lowerCamelCase , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_lowerCamelCase , inference=_lowerCamelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_lowerCamelCase , '''log.txt''' ) , log_print=_lowerCamelCase , trace_memory_line_by_line=_lowerCamelCase , multi_process=_lowerCamelCase , )
lowerCAmelCase_ = PyTorchBenchmark(_lowerCamelCase )
lowerCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_lowerCamelCase , '''log.txt''' ) ).exists() )
| 274
| 1
|
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
snake_case__ : List[str] = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
snake_case__ : List[compression.BaseCompressedFileFileSystem] = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCamelCase__ ( _lowerCamelCase ) ->str:
if "://" in dataset_path:
_UpperCAmelCase =dataset_path.split("://" )[1]
return dataset_path
def lowerCamelCase__ ( _lowerCamelCase ) ->bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->Union[str, Any]:
_UpperCAmelCase =not is_remote_filesystem(_lowerCamelCase )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(_lowerCamelCase ) , fs._strip_protocol(_lowerCamelCase ) )
else:
fs.mv(_lowerCamelCase , _lowerCamelCase , recursive=_lowerCamelCase )
def lowerCamelCase__ ( ) ->None:
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
_UpperCAmelCase =None
_UpperCAmelCase =None
_UpperCAmelCase =threading.Lock()
| 592
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def lowerCamelCase__ ( _lowerCamelCase ) ->str:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("Undefined for non-integers" )
elif precision < 1:
raise ValueError("Undefined for non-natural numbers" )
_UpperCAmelCase =precision
_UpperCAmelCase =ceil(precision / 14 )
_UpperCAmelCase =42_6880 * Decimal(1_0005 ).sqrt()
_UpperCAmelCase =1
_UpperCAmelCase =1359_1409
_UpperCAmelCase =Decimal(_lowerCamelCase )
for k in range(1 , _lowerCamelCase ):
_UpperCAmelCase =factorial(6 * k ) // (factorial(3 * k ) * factorial(_lowerCamelCase ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case__ : str = 5_0
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 592
| 1
|
'''simple docstring'''
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
lowerCAmelCase: Dict = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
lowerCAmelCase: Union[str, Any] = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
lowerCAmelCase: Union[str, Any] = re.compile(r'TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
lowerCAmelCase: Optional[Any] = re.compile(r'Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
lowerCAmelCase: int = re.compile(r'(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)')
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
lowerCAmelCase: List[Any] = [
('pretraining', 'MODEL_FOR_PRETRAINING_MAPPING_NAMES', 'AutoModelForPreTraining'),
('feature-extraction', 'MODEL_MAPPING_NAMES', 'AutoModel'),
('audio-classification', 'MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForAudioClassification'),
('text-generation', 'MODEL_FOR_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForCausalLM'),
('automatic-speech-recognition', 'MODEL_FOR_CTC_MAPPING_NAMES', 'AutoModelForCTC'),
('image-classification', 'MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForImageClassification'),
('image-segmentation', 'MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES', 'AutoModelForImageSegmentation'),
('fill-mask', 'MODEL_FOR_MASKED_LM_MAPPING_NAMES', 'AutoModelForMaskedLM'),
('object-detection', 'MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES', 'AutoModelForObjectDetection'),
(
'zero-shot-object-detection',
'MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES',
'AutoModelForZeroShotObjectDetection',
),
('question-answering', 'MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES', 'AutoModelForQuestionAnswering'),
('text2text-generation', 'MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES', 'AutoModelForSeq2SeqLM'),
('text-classification', 'MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForSequenceClassification'),
('automatic-speech-recognition', 'MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES', 'AutoModelForSpeechSeq2Seq'),
(
'table-question-answering',
'MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForTableQuestionAnswering',
),
('token-classification', 'MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForTokenClassification'),
('multiple-choice', 'MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES', 'AutoModelForMultipleChoice'),
(
'next-sentence-prediction',
'MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES',
'AutoModelForNextSentencePrediction',
),
(
'audio-frame-classification',
'MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForAudioFrameClassification',
),
('audio-xvector', 'MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES', 'AutoModelForAudioXVector'),
(
'document-question-answering',
'MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForDocumentQuestionAnswering',
),
(
'visual-question-answering',
'MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES',
'AutoModelForVisualQuestionAnswering',
),
('image-to-text', 'MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES', 'AutoModelForVision2Seq'),
(
'zero-shot-image-classification',
'MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES',
'AutoModelForZeroShotImageClassification',
),
('depth-estimation', 'MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES', 'AutoModelForDepthEstimation'),
('video-classification', 'MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES', 'AutoModelForVideoClassification'),
('mask-generation', 'MODEL_FOR_MASK_GENERATION_MAPPING_NAMES', 'AutoModelForMaskGeneration'),
]
def lowerCamelCase__ ( _A ):
a : Optional[Any] = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , _A )
return [m.group(0 ) for m in matches]
def lowerCamelCase__ ( ):
a : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
a : Union[str, Any] = {
config.replace('Config' , '' ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
a : Union[str, Any] = collections.defaultdict(_A )
a : Optional[Any] = collections.defaultdict(_A )
a : int = collections.defaultdict(_A )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_A ):
a : Optional[Any] = None
if _re_tf_models.match(_A ) is not None:
a : List[str] = tf_models
a : str = _re_tf_models.match(_A ).groups()[0]
elif _re_flax_models.match(_A ) is not None:
a : Optional[int] = flax_models
a : List[Any] = _re_flax_models.match(_A ).groups()[0]
elif _re_pt_models.match(_A ) is not None:
a : int = pt_models
a : List[str] = _re_pt_models.match(_A ).groups()[0]
if lookup_dict is not None:
while len(_A ) > 0:
if attr_name in model_prefix_to_model_type:
a : Optional[Any] = True
break
# Try again after removing the last word in the name
a : List[str] = ''.join(camel_case_split(_A )[:-1] )
a : Any = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
a : int = list(_A )
all_models.sort()
a : Optional[Any] = {'model_type': all_models}
a : List[str] = [pt_models[t] for t in all_models]
a : str = [tf_models[t] for t in all_models]
a : int = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
a : Any = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
a : Optional[int] = 'AutoProcessor'
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
a : Union[str, Any] = 'AutoTokenizer'
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
a : Tuple = 'AutoFeatureExtractor'
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
a : str = 'AutoTokenizer'
a : str = [processors[t] for t in all_models]
return pd.DataFrame(_A )
def lowerCamelCase__ ( _A ):
a : List[Any] = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
a : Optional[Any] = [model_mapping, f"""TF_{model_mapping}""", f"""FLAX_{model_mapping}"""]
a : Optional[int] = [auto_class, f"""TF_{auto_class}""", f"""Flax_{auto_class}"""]
# Loop through all three frameworks
for module, cls, mapping in zip(_A , _A , _A ):
# The type of pipeline may not exist in this framework
if not hasattr(_A , _A ):
continue
# First extract all model_names
a : List[str] = []
for name in getattr(_A , _A ).values():
if isinstance(_A , _A ):
model_names.append(_A )
else:
model_names.extend(list(_A ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def lowerCamelCase__ ( _A , _A ):
a : Optional[int] = get_frameworks_table()
a : List[str] = Dataset.from_pandas(_A )
a : Optional[Any] = hf_hub_download(
'huggingface/transformers-metadata' , 'pipeline_tags.json' , repo_type='dataset' , token=_A )
a : List[Any] = Dataset.from_json(_A )
a : Optional[Any] = {
tags_dataset[i]['model_class']: (tags_dataset[i]['pipeline_tag'], tags_dataset[i]['auto_class'])
for i in range(len(_A ) )
}
a : Dict = update_pipeline_and_auto_class_table(_A )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
a : str = sorted(table.keys() )
a : List[Any] = pd.DataFrame(
{
'model_class': model_classes,
'pipeline_tag': [table[m][0] for m in model_classes],
'auto_class': [table[m][1] for m in model_classes],
} )
a : Optional[Any] = Dataset.from_pandas(_A )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_A , 'frameworks.json' ) )
tags_dataset.to_json(os.path.join(_A , 'pipeline_tags.json' ) )
if commit_sha is not None:
a : int = (
f"""Update with commit {commit_sha}\n\nSee: """
f"""https://github.com/huggingface/transformers/commit/{commit_sha}"""
)
else:
a : Optional[int] = 'Update'
upload_folder(
repo_id='huggingface/transformers-metadata' , folder_path=_A , repo_type='dataset' , token=_A , commit_message=_A , )
def lowerCamelCase__ ( ):
a : int = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
a : List[Any] = transformers_module.pipelines.SUPPORTED_TASKS
a : str = []
for key in pipeline_tasks:
if key not in in_table:
a : Dict = pipeline_tasks[key]['pt']
if isinstance(_A , (list, tuple) ):
a : List[Any] = model[0]
a : Union[str, Any] = model.__name__
if model not in in_table.values():
missing.append(_A )
if len(_A ) > 0:
a : List[str] = ', '.join(_A )
raise ValueError(
'The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside '
f"""`utils/update_metadata.py`: {msg}. Please add them!""" )
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument('--token', type=str, help='The token to use to push to the transformers-metadata dataset.')
parser.add_argument('--commit_sha', type=str, help='The sha of the commit going with this update.')
parser.add_argument('--check-only', action='store_true', help='Activate to just check all pipelines are present.')
lowerCAmelCase: Optional[Any] = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 526
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__( lowerCamelCase__ ):
lowercase__ = ["""image_processor""", """tokenizer"""]
lowercase__ = """BridgeTowerImageProcessor"""
lowercase__ = ("""RobertaTokenizer""", """RobertaTokenizerFast""")
def __init__( self : int , __snake_case : str , __snake_case : List[str] ):
super().__init__(__snake_case , __snake_case )
def __call__( self : int , __snake_case : Optional[Any] , __snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __snake_case : bool = True , __snake_case : Union[bool, str, PaddingStrategy] = False , __snake_case : Union[bool, str, TruncationStrategy] = None , __snake_case : Optional[int] = None , __snake_case : int = 0 , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[bool] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = False , __snake_case : bool = True , __snake_case : Optional[Union[str, TensorType]] = None , **__snake_case : List[Any] , ):
a : Optional[int] = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_token_type_ids=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
# add pixel_values + pixel_mask
a : List[str] = self.image_processor(
__snake_case , return_tensors=__snake_case , do_normalize=__snake_case , do_center_crop=__snake_case , **__snake_case )
encoding.update(__snake_case )
return encoding
def lowercase_ ( self : int , *__snake_case : List[str] , **__snake_case : List[str] ):
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def lowercase_ ( self : List[str] , *__snake_case : Tuple , **__snake_case : Union[str, Any] ):
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def lowercase_ ( self : Optional[Any] ):
a : Optional[Any] = self.tokenizer.model_input_names
a : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 526
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
__SCREAMING_SNAKE_CASE : Dict = "camembert"
def __init__( self : Dict , __UpperCamelCase : Tuple=30_522 , __UpperCamelCase : Any=768 , __UpperCamelCase : Any=12 , __UpperCamelCase : Any=12 , __UpperCamelCase : Optional[int]=3_072 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[int]=512 , __UpperCamelCase : int=2 , __UpperCamelCase : List[Any]=0.02 , __UpperCamelCase : Optional[Any]=1e-1_2 , __UpperCamelCase : str=1 , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : int=2 , __UpperCamelCase : Optional[Any]="absolute" , __UpperCamelCase : List[str]=True , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : Any , ):
super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self.task == "multiple-choice":
_UpperCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 712
|
from ..utils import DummyObject, requires_backends
class __SCREAMING_SNAKE_CASE ( metaclass=lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = ["""keras_nlp"""]
def __init__( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Optional[int] ):
requires_backends(self , ["keras_nlp"] )
| 129
| 0
|
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
A = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
["memory_attention", "encoder_attn"],
["attention", "attn"],
["/", "."],
[".LayerNorm.gamma", "_layer_norm.weight"],
[".LayerNorm.beta", "_layer_norm.bias"],
["r.layer_", "r.layers."],
["output_proj", "out_proj"],
["ffn.dense_1.", "fc2."],
["ffn.dense.", "fc1."],
["ffn_layer_norm", "final_layer_norm"],
["kernel", "weight"],
["encoder_layer_norm.", "encoder.layer_norm."],
["decoder_layer_norm.", "decoder.layer_norm."],
["embeddings.weights", "shared.weight"],
]
def __UpperCAmelCase ( __A ) -> str:
'''simple docstring'''
for pegasus_name, hf_name in PATTERNS:
UpperCAmelCase__ = k.replace(__A , __A )
return k
def __UpperCAmelCase ( __A , __A ) -> PegasusForConditionalGeneration:
'''simple docstring'''
UpperCAmelCase__ = DEFAULTS.copy()
cfg_kwargs.update(__A )
UpperCAmelCase__ = PegasusConfig(**__A )
UpperCAmelCase__ = PegasusForConditionalGeneration(__A )
UpperCAmelCase__ = torch_model.model.state_dict()
UpperCAmelCase__ = {}
for k, v in tf_weights.items():
UpperCAmelCase__ = rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" )
if "dense" in k or "proj" in new_k:
UpperCAmelCase__ = v.T
UpperCAmelCase__ = torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F"""{new_k}, {k}, {v.shape}, {sd[new_k].shape}"""
# make sure embedding.padding_idx is respected
UpperCAmelCase__ = torch.zeros_like(mapping["shared.weight"][cfg.pad_token_id + 1] )
UpperCAmelCase__ = mapping["shared.weight"]
UpperCAmelCase__ = mapping["shared.weight"]
UpperCAmelCase__ = {k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith("bias" ) and k not in mapping}
mapping.update(**__A )
UpperCAmelCase__ , UpperCAmelCase__ = torch_model.model.load_state_dict(__A , strict=__A )
UpperCAmelCase__ = [
k for k in missing if k not in ["encoder.embed_positions.weight", "decoder.embed_positions.weight"]
]
assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], F"""no matches found for the following tf keys {extra}"""
return torch_model
def __UpperCAmelCase ( __A="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = tf.train.list_variables(__A )
UpperCAmelCase__ = {}
UpperCAmelCase__ = ["Adafactor", "global_step"]
for name, shape in tqdm(__A , desc="converting tf checkpoint to dict" ):
UpperCAmelCase__ = any(pat in name for pat in ignore_name )
if skip_key:
continue
UpperCAmelCase__ = tf.train.load_variable(__A , __A )
UpperCAmelCase__ = array
return tf_weights
def __UpperCAmelCase ( __A , __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = Path(__A ).parent.name
UpperCAmelCase__ = task_specific_params[F"""summarization_{dataset}"""]["max_position_embeddings"]
UpperCAmelCase__ = PegasusTokenizer.from_pretrained("sshleifer/pegasus" , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
UpperCAmelCase__ = get_tf_weights_as_numpy(__A )
UpperCAmelCase__ = task_specific_params[F"""summarization_{dataset}"""]
if dataset == "large":
UpperCAmelCase__ = task_specific_params
UpperCAmelCase__ = convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
UpperCAmelCase__ = torch_model.state_dict()
sd.pop("model.decoder.embed_positions.weight" )
sd.pop("model.encoder.embed_positions.weight" )
torch.save(__A , Path(__A ) / "pytorch_model.bin" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("save_dir", default=None, type=str, help="Path to the output PyTorch model.")
A = parser.parse_args()
if args.save_dir is None:
A = Path(args.tf_ckpt_path).parent.name
A = os.path.join("pegasus", dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 475
|
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class lowercase__ ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Optional[Any] , _lowercase : float , _lowercase : Callable , _lowercase : int , _lowercase : float = 1.0 , _lowercase : str = None , ):
"""simple docstring"""
super().__init__()
UpperCAmelCase__ = initial_learning_rate
UpperCAmelCase__ = warmup_steps
UpperCAmelCase__ = power
UpperCAmelCase__ = decay_schedule_fn
UpperCAmelCase__ = name
def __call__( self : int , _lowercase : List[str] ):
"""simple docstring"""
with tf.name_scope(self.name or "WarmUp" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCAmelCase__ = tf.cast(_lowercase , tf.floataa )
UpperCAmelCase__ = tf.cast(self.warmup_steps , tf.floataa )
UpperCAmelCase__ = global_step_float / warmup_steps_float
UpperCAmelCase__ = self.initial_learning_rate * tf.math.pow(_lowercase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_lowercase , )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __UpperCAmelCase ( __A , __A , __A , __A = 0.0 , __A = 0.9 , __A = 0.999 , __A = 1E-8 , __A = None , __A = None , __A = 0.0 , __A = 1.0 , __A = None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=__A , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=__A , )
if num_warmup_steps:
UpperCAmelCase__ = WarmUp(
initial_learning_rate=__A , decay_schedule_fn=__A , warmup_steps=__A , )
if weight_decay_rate > 0.0:
UpperCAmelCase__ = AdamWeightDecay(
learning_rate=__A , weight_decay_rate=__A , beta_a=__A , beta_a=__A , epsilon=__A , clipnorm=__A , global_clipnorm=__A , exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"] , include_in_weight_decay=__A , )
else:
UpperCAmelCase__ = tf.keras.optimizers.Adam(
learning_rate=__A , beta_a=__A , beta_a=__A , epsilon=__A , clipnorm=__A , global_clipnorm=__A , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Dict , _lowercase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.0_0_1 , _lowercase : float = 0.9 , _lowercase : float = 0.9_9_9 , _lowercase : float = 1E-7 , _lowercase : bool = False , _lowercase : float = 0.0 , _lowercase : Optional[List[str]] = None , _lowercase : Optional[List[str]] = None , _lowercase : str = "AdamWeightDecay" , **_lowercase : int , ):
"""simple docstring"""
super().__init__(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase )
UpperCAmelCase__ = weight_decay_rate
UpperCAmelCase__ = include_in_weight_decay
UpperCAmelCase__ = exclude_from_weight_decay
@classmethod
def _UpperCAmelCase ( cls : Dict , _lowercase : Dict ):
"""simple docstring"""
UpperCAmelCase__ = {"WarmUp": WarmUp}
return super(_lowercase , cls ).from_config(_lowercase , custom_objects=_lowercase )
def _UpperCAmelCase ( self : Optional[int] , _lowercase : str , _lowercase : Tuple , _lowercase : List[str] ):
"""simple docstring"""
super(_lowercase , self )._prepare_local(_lowercase , _lowercase , _lowercase )
UpperCAmelCase__ = tf.constant(
self.weight_decay_rate , name="adam_weight_decay_rate" )
def _UpperCAmelCase ( self : List[Any] , _lowercase : List[str] , _lowercase : int , _lowercase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["weight_decay_rate"] , use_locking=self._use_locking , )
return tf.no_op()
def _UpperCAmelCase ( self : Dict , _lowercase : List[Any] , _lowercase : Union[str, Any]=None , **_lowercase : Any ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = list(zip(*_lowercase ) )
return super(_lowercase , self ).apply_gradients(zip(_lowercase , _lowercase ) , name=_lowercase , **_lowercase )
def _UpperCAmelCase ( self : str , _lowercase : List[str] , _lowercase : Tuple , _lowercase : Optional[int] ):
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCAmelCase__ = apply_state or {}
UpperCAmelCase__ = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCAmelCase__ = self._fallback_apply_state(_lowercase , _lowercase )
UpperCAmelCase__ = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : List[str] , _lowercase : List[Any] , _lowercase : Any=None ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self._get_lr(var.device , var.dtype.base_dtype , _lowercase )
UpperCAmelCase__ = self._decay_weights_op(_lowercase , _lowercase , _lowercase )
with tf.control_dependencies([decay] ):
return super(_lowercase , self )._resource_apply_dense(_lowercase , _lowercase , **_lowercase )
def _UpperCAmelCase ( self : List[str] , _lowercase : Tuple , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : List[Any]=None ):
"""simple docstring"""
UpperCAmelCase__ , UpperCAmelCase__ = self._get_lr(var.device , var.dtype.base_dtype , _lowercase )
UpperCAmelCase__ = self._decay_weights_op(_lowercase , _lowercase , _lowercase )
with tf.control_dependencies([decay] ):
return super(_lowercase , self )._resource_apply_sparse(_lowercase , _lowercase , _lowercase , **_lowercase )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = super().get_config()
config.update({"weight_decay_rate": self.weight_decay_rate} )
return config
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : Dict ):
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_lowercase , _lowercase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_lowercase , _lowercase ) is not None:
return False
return True
class lowercase__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str ):
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = None
@property
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
if self._accum_steps is None:
UpperCAmelCase__ = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if not self._gradients:
raise ValueError("The accumulator should be called first to initialize the gradients" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : Any , _lowercase : List[str] ):
"""simple docstring"""
if not self._gradients:
UpperCAmelCase__ = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_lowercase ) , trainable=_lowercase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_lowercase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(_lowercase )}""" )
for accum_gradient, gradient in zip(self._gradients , _lowercase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_lowercase )
self._accum_steps.assign_add(1 )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_lowercase ) )
| 475
| 1
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def _lowerCamelCase ( __a ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 628
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case ( __lowercase , unittest.TestCase ):
UpperCAmelCase__ = KandinskyVaaControlnetPipeline
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
UpperCAmelCase__ = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
UpperCAmelCase__ = False
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return 32
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim
@property
def _lowercase (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowercase (self ):
"""simple docstring"""
return 1_00
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**SCREAMING_SNAKE_CASE_ )
return model
@property
def _lowercase (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _lowercase (self ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='''linear''' , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=SCREAMING_SNAKE_CASE_ , set_alpha_to_one=SCREAMING_SNAKE_CASE_ , steps_offset=1 , prediction_type='''epsilon''' , thresholding=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
SCREAMING_SNAKE_CASE_ )
# create hint
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(SCREAMING_SNAKE_CASE_ ) ).to(SCREAMING_SNAKE_CASE_ )
if str(SCREAMING_SNAKE_CASE_ ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = '''cpu'''
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.6_95_98_26, 0.86_82_79, 0.7_55_80_92, 0.68_76_94_67, 0.85_80_58_04, 0.65_97_74_96, 0.44_88_53_02, 0.5_95_91_11, 0.4_25_15_95] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def _lowercase (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy''' )
SCREAMING_SNAKE_CASE_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE_ ) ).float() / 2_55.0
SCREAMING_SNAKE_CASE_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(SCREAMING_SNAKE_CASE_ )
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = '''A robot, 4k photo'''
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = pipe_prior(
SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='''cuda''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipeline(
image_embeds=SCREAMING_SNAKE_CASE_ , negative_image_embeds=SCREAMING_SNAKE_CASE_ , hint=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=1_00 , output_type='''np''' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 628
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.