code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_trajectory_transformer': [
'TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TrajectoryTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrajectoryTransformerModel',
'TrajectoryTransformerPreTrainedModel',
'load_tf_weights_in_trajectory_transformer',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 715
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 0
|
'''simple docstring'''
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ ( __lowerCAmelCase ):
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int]=13 , lowerCamelCase__ : Optional[Any]=7 , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Any=99 , lowerCamelCase__ : int=32 , lowerCamelCase__ : int=5 , lowerCamelCase__ : Optional[int]=4 , lowerCamelCase__ : int=37 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : str=16 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]="None" , lowerCamelCase__ : str=3 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Optional[int]=None , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : Tuple = seq_length
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : List[str] = use_input_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : Tuple = use_labels
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : int = type_vocab_size
_UpperCAmelCase : List[Any] = type_sequence_label_size
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : List[str] = num_labels
_UpperCAmelCase : List[Any] = num_choices
_UpperCAmelCase : Union[str, Any] = relative_attention
_UpperCAmelCase : List[Any] = position_biased_input
_UpperCAmelCase : Dict = pos_att_type
_UpperCAmelCase : Union[str, Any] = scope
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = None
if self.use_labels:
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Any ) ->Any:
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_UpperCAmelCase : List[Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_UpperCAmelCase : int = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_UpperCAmelCase : Dict = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.num_labels
_UpperCAmelCase : Optional[int] = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_UpperCAmelCase : int = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : List[str] = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_UpperCAmelCase : Tuple = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_UpperCAmelCase : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Dict = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Optional[int] = config_and_inputs
_UpperCAmelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase : int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : str = True
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : str = False
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = DebertaVaModelTester(self )
_UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def lowerCAmelCase__ ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : str = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
_UpperCAmelCase : Optional[int] = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_UpperCAmelCase : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCAmelCase : str = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : List[Any] = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 716
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
lowerCamelCase__ = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
lowerCamelCase__ = {
'jukebox': 512,
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : str = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Optional[int] = PRETRAINED_LYRIC_TOKENS_SIZES
lowerCAmelCase : int = ['input_ids', 'attention_mask']
def __init__( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int=["v3", "v2", "v2"] , lowerCamelCase__ : Tuple=5_12 , lowerCamelCase__ : Any=5 , lowerCamelCase__ : Optional[Any]="<|endoftext|>" , **lowerCamelCase__ : Dict , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else unk_token
super().__init__(
unk_token=UpperCAmelCase__ , n_genres=UpperCAmelCase__ , version=UpperCAmelCase__ , max_n_lyric_tokens=UpperCAmelCase__ , **UpperCAmelCase__ , )
_UpperCAmelCase : Union[str, Any] = version
_UpperCAmelCase : Union[str, Any] = max_n_lyric_tokens
_UpperCAmelCase : List[str] = n_genres
with open(UpperCAmelCase__ , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase : Tuple = json.load(UpperCAmelCase__ )
with open(UpperCAmelCase__ , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase : Union[str, Any] = json.load(UpperCAmelCase__ )
with open(UpperCAmelCase__ , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase : Union[str, Any] = json.load(UpperCAmelCase__ )
_UpperCAmelCase : Optional[Any] = r'''[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'''
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
_UpperCAmelCase : Optional[Any] = oov.replace(R"\-\'" , R"\-+\'" )
_UpperCAmelCase : List[str] = regex.compile(UpperCAmelCase__ )
_UpperCAmelCase : Tuple = {v: k for k, v in self.artists_encoder.items()}
_UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.genres_encoder.items()}
_UpperCAmelCase : Tuple = {v: k for k, v in self.lyrics_encoder.items()}
@property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = [self.artists_encoder.get(UpperCAmelCase__ , 0 ) for artist in list_artists]
for genres in range(len(UpperCAmelCase__ ) ):
_UpperCAmelCase : Optional[int] = [self.genres_encoder.get(UpperCAmelCase__ , 0 ) for genre in list_genres[genres]]
_UpperCAmelCase : Optional[int] = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
_UpperCAmelCase : List[Any] = [[self.lyrics_encoder.get(UpperCAmelCase__ , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
return list(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.prepare_for_tokenization(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_UpperCAmelCase : Optional[int] = self._tokenize(UpperCAmelCase__ )
return artist, genre, lyrics
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : bool = False ) ->str:
'''simple docstring'''
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
_UpperCAmelCase : List[str] = artists[idx].lower()
_UpperCAmelCase : Optional[Any] = [genres[idx].lower()]
else:
_UpperCAmelCase : List[Any] = self._normalize(artists[idx] ) + '''.v2'''
_UpperCAmelCase : int = [
self._normalize(UpperCAmelCase__ ) + '''.v2''' for genre in genres[idx].split("_" )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
_UpperCAmelCase : Any = regex.compile(R"[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+" )
_UpperCAmelCase : int = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'''
_UpperCAmelCase : Dict = {vocab[index]: index + 1 for index in range(len(UpperCAmelCase__ ) )}
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : Tuple = len(UpperCAmelCase__ ) + 1
_UpperCAmelCase : Optional[int] = self.vocab
_UpperCAmelCase : Optional[int] = {v: k for k, v in self.vocab.items()}
_UpperCAmelCase : int = ''''''
else:
_UpperCAmelCase : Dict = regex.compile(R"[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+" )
_UpperCAmelCase : int = self._run_strip_accents(UpperCAmelCase__ )
_UpperCAmelCase : List[str] = lyrics.replace("\\" , "\n" )
_UpperCAmelCase : List[str] = self.out_of_vocab.sub("" , UpperCAmelCase__ ), [], []
return artists, genres, lyrics
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = unicodedata.normalize("NFD" , UpperCAmelCase__ )
_UpperCAmelCase : Any = []
for char in text:
_UpperCAmelCase : int = unicodedata.category(UpperCAmelCase__ )
if cat == "Mn":
continue
output.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = (
[chr(UpperCAmelCase__ ) for i in range(ord("a" ) , ord("z" ) + 1 )]
+ [chr(UpperCAmelCase__ ) for i in range(ord("A" ) , ord("Z" ) + 1 )]
+ [chr(UpperCAmelCase__ ) for i in range(ord("0" ) , ord("9" ) + 1 )]
+ ['''.''']
)
_UpperCAmelCase : Optional[Any] = frozenset(UpperCAmelCase__ )
_UpperCAmelCase : Dict = re.compile(R"_+" )
_UpperCAmelCase : int = ''''''.join([c if c in accepted else "_" for c in text.lower()] )
_UpperCAmelCase : List[Any] = pattern.sub("_" , UpperCAmelCase__ ).strip("_" )
return text
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[str] ) ->int:
'''simple docstring'''
return " ".join(UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , lowerCamelCase__ : bool = False ) ->int:
'''simple docstring'''
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_UpperCAmelCase : List[str] = TensorType(UpperCAmelCase__ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
"Unable to convert output to TensorFlow tensors format, TensorFlow is not installed." )
import tensorflow as tf
_UpperCAmelCase : Dict = tf.constant
_UpperCAmelCase : List[str] = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError("Unable to convert output to PyTorch tensors format, PyTorch is not installed." )
import torch
_UpperCAmelCase : str = torch.tensor
_UpperCAmelCase : List[Any] = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError("Unable to convert output to JAX tensors format, JAX is not installed." )
import jax.numpy as jnp # noqa: F811
_UpperCAmelCase : Optional[int] = jnp.array
_UpperCAmelCase : Optional[Any] = _is_jax
else:
_UpperCAmelCase : int = np.asarray
_UpperCAmelCase : Union[str, Any] = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
_UpperCAmelCase : Optional[int] = [inputs]
if not is_tensor(UpperCAmelCase__ ):
_UpperCAmelCase : Union[str, Any] = as_tensor(UpperCAmelCase__ )
except: # noqa E722
raise ValueError(
"Unable to create tensor, you should probably activate truncation and/or padding "
"with \'padding=True\' \'truncation=True\' to have batched tensors with the same length." )
return inputs
def __call__( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any]="" , lowerCamelCase__ : int="pt" ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = [0, 0, 0]
_UpperCAmelCase : Optional[int] = [artist] * len(self.version )
_UpperCAmelCase : Any = [genres] * len(self.version )
_UpperCAmelCase : Dict = self.tokenize(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_UpperCAmelCase : List[Any] = self._convert_token_to_id(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = [-INFINITY] * len(full_tokens[-1] )
_UpperCAmelCase : Optional[Any] = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=UpperCAmelCase__ )
for i in range(len(self.version ) )
]
return BatchEncoding({"input_ids": input_ids, "attention_masks": attention_masks} )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->str:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : Tuple = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["artists_file"] )
with open(UpperCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=UpperCAmelCase__ ) )
_UpperCAmelCase : Any = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["genres_file"] )
with open(UpperCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=UpperCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = os.path.join(
UpperCAmelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["lyrics_file"] )
with open(UpperCAmelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=UpperCAmelCase__ ) )
return (artists_file, genres_file, lyrics_file)
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.artists_decoder.get(UpperCAmelCase__ )
_UpperCAmelCase : int = [self.genres_decoder.get(UpperCAmelCase__ ) for genre in genres_index]
_UpperCAmelCase : int = [self.lyrics_decoder.get(UpperCAmelCase__ ) for character in lyric_index]
return artist, genres, lyrics
| 717
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
lowerCamelCase__ = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/config.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/config.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json"""
),
"""distilbert-base-uncased-finetuned-sst-2-english""": (
"""https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( _snake_case ):
lowerCAmelCase : List[str] = """distilbert"""
lowerCAmelCase : Optional[Any] = {
"""hidden_size""": """dim""",
"""num_attention_heads""": """n_heads""",
"""num_hidden_layers""": """n_layers""",
}
def __init__( self : Optional[int] , lowerCamelCase__ : Dict=3_05_22 , lowerCamelCase__ : str=5_12 , lowerCamelCase__ : int=False , lowerCamelCase__ : Union[str, Any]=6 , lowerCamelCase__ : int=12 , lowerCamelCase__ : List[Any]=7_68 , lowerCamelCase__ : List[Any]=4 * 7_68 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Dict="gelu" , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Optional[Any]=0.2 , lowerCamelCase__ : Union[str, Any]=0 , **lowerCamelCase__ : List[str] , ) ->int:
'''simple docstring'''
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Tuple = sinusoidal_pos_embds
_UpperCAmelCase : int = n_layers
_UpperCAmelCase : Any = n_heads
_UpperCAmelCase : List[str] = dim
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : List[Any] = dropout
_UpperCAmelCase : str = attention_dropout
_UpperCAmelCase : Optional[Any] = activation
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = qa_dropout
_UpperCAmelCase : Tuple = seq_classif_dropout
super().__init__(**snake_case_ , pad_token_id=snake_case_ )
class lowerCAmelCase__ ( _snake_case ):
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCAmelCase : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
_UpperCAmelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 720
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
import os
from typing import List, Optional, Union
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
from ..auto import AutoTokenizer
class lowerCAmelCase__ ( lowercase__ ):
lowerCAmelCase : List[Any] = ["image_processor", "tokenizer"]
lowerCAmelCase : Optional[int] = "BlipImageProcessor"
lowerCAmelCase : Optional[int] = "AutoTokenizer"
def __init__( self : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] ) ->Optional[Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase__ , UpperCAmelCase__ )
# add QFormer tokenizer
_UpperCAmelCase : Optional[int] = qformer_tokenizer
def __call__( self : Union[str, Any] , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Union[str, Any] , ) ->List[Any]:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify at least images or text." )
_UpperCAmelCase : List[str] = BatchFeature()
if text is not None:
_UpperCAmelCase : str = self.tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
encoding.update(UpperCAmelCase__ )
_UpperCAmelCase : Optional[int] = self.qformer_tokenizer(
text=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=UpperCAmelCase__ , stride=UpperCAmelCase__ , pad_to_multiple_of=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , return_overflowing_tokens=UpperCAmelCase__ , return_special_tokens_mask=UpperCAmelCase__ , return_offsets_mapping=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ , return_length=UpperCAmelCase__ , verbose=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ , )
_UpperCAmelCase : Any = qformer_text_encoding.pop("input_ids" )
_UpperCAmelCase : Optional[Any] = qformer_text_encoding.pop("attention_mask" )
if images is not None:
_UpperCAmelCase : Union[str, Any] = self.image_processor(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ )
encoding.update(UpperCAmelCase__ )
return encoding
def lowerCAmelCase__ ( self : Dict , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ) ->Dict:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : str , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase__ , **UpperCAmelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : int , **lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if os.path.isfile(UpperCAmelCase__ ):
raise ValueError(F"""Provided path ({save_directory}) should be a directory, not a file""" )
os.makedirs(UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
_UpperCAmelCase : Tuple = os.path.join(UpperCAmelCase__ , "qformer_tokenizer" )
self.qformer_tokenizer.save_pretrained(UpperCAmelCase__ )
return super().save_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(UpperCAmelCase__ , subfolder="qformer_tokenizer" )
_UpperCAmelCase : int = cls._get_arguments_from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
args.append(UpperCAmelCase__ )
return cls(*UpperCAmelCase__ )
| 721
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 0
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __lowerCAmelCase ():
_UpperCAmelCase : int = ArgumentParser("Transformers CLI tool" , usage="transformers-cli <command> [<args>]" )
_UpperCAmelCase : Dict = parser.add_subparsers(help="transformers-cli command helpers" )
# Register commands
ConvertCommand.register_subcommand(_lowerCAmelCase )
DownloadCommand.register_subcommand(_lowerCAmelCase )
EnvironmentCommand.register_subcommand(_lowerCAmelCase )
RunCommand.register_subcommand(_lowerCAmelCase )
ServeCommand.register_subcommand(_lowerCAmelCase )
UserCommands.register_subcommand(_lowerCAmelCase )
AddNewModelCommand.register_subcommand(_lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(_lowerCAmelCase )
LfsCommands.register_subcommand(_lowerCAmelCase )
PTtoTFCommand.register_subcommand(_lowerCAmelCase )
# Let's go
_UpperCAmelCase : Any = parser.parse_args()
if not hasattr(_lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
_UpperCAmelCase : Tuple = args.func(_lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 700
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCamelCase__ = logging.getLogger(__name__)
lowerCamelCase__ = 'pytorch_model.bin'
@dataclasses.dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Tuple = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
lowerCAmelCase : Any = dataclasses.field(
default=lowerCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class lowerCAmelCase__ :
lowerCAmelCase : int = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
lowerCAmelCase : Tuple = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
lowerCAmelCase : List[str] = dataclasses.field(
default=lowerCamelCase__ , metadata={"help": "A csv or a json file containing the validation data."} )
lowerCAmelCase : Optional[int] = dataclasses.field(
default=lowerCamelCase__ , metadata={"help": "The name of the task to train on."} , )
lowerCAmelCase : Union[str, Any] = dataclasses.field(
default=lowerCamelCase__ , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class lowerCAmelCase__ :
lowerCAmelCase : Union[str, Any] = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
lowerCAmelCase : Dict = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
lowerCAmelCase : int = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
lowerCAmelCase : Union[str, Any] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowerCAmelCase : List[Any] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
lowerCAmelCase : Optional[Any] = dataclasses.field(
default=lowerCamelCase__ , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
lowerCAmelCase : str = dataclasses.field(
default=lowerCamelCase__ , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
lowerCAmelCase : List[str] = dataclasses.field(
default=lowerCamelCase__ , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
lowerCAmelCase : int = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
lowerCAmelCase : List[str] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowerCAmelCase : Any = dataclasses.field(
default=lowerCamelCase__ , metadata={"help": "Random seed for initialization."} , )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_UpperCAmelCase : Union[str, Any] = dataset.filter(lambda __lowerCAmelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_UpperCAmelCase : List[str] = int(eval_result * len(lowerCamelCase_ ) )
print(lowerCamelCase_ )
_UpperCAmelCase : Any = dataset.sort("probability" , reverse=lowerCamelCase_ )
_UpperCAmelCase : Any = dataset.select(range(lowerCamelCase_ ) )
_UpperCAmelCase : Tuple = dataset.remove_columns(["label", "probability"] )
_UpperCAmelCase : str = dataset.rename_column("prediction" , "label" )
_UpperCAmelCase : List[str] = dataset.map(lambda __lowerCAmelCase : {"label": idalabel[example["label"]]} )
_UpperCAmelCase : Dict = dataset.shuffle(seed=args.seed )
_UpperCAmelCase : Union[str, Any] = os.path.join(lowerCamelCase_ , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(lowerCamelCase_ , index=lowerCamelCase_ )
else:
dataset.to_json(lowerCamelCase_ )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ):
_UpperCAmelCase : Any = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase : Tuple = STModelArguments(model_name_or_path=lowerCamelCase_ )
_UpperCAmelCase : Tuple = STDataArguments(train_file=lowerCamelCase_ , infer_file=lowerCamelCase_ )
_UpperCAmelCase : Any = STTrainingArguments(output_dir=lowerCamelCase_ )
_UpperCAmelCase : Optional[int] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowerCamelCase_ ).items():
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for key, value in kwargs.items():
if hasattr(lowerCamelCase_ , lowerCamelCase_ ):
setattr(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# Sanity checks
_UpperCAmelCase : str = {}
_UpperCAmelCase : Optional[int] = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_UpperCAmelCase : List[Any] = args.train_file
_UpperCAmelCase : int = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_UpperCAmelCase : str = args.eval_file
for key in data_files:
_UpperCAmelCase : List[Any] = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
_UpperCAmelCase : Optional[int] = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
_UpperCAmelCase : str = F"""{args.output_dir}/self-train_iter-{{}}""".format
_UpperCAmelCase : Union[str, Any] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowerCamelCase_ )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
accelerator.wait_for_everyone()
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : int = False
# Show the progress bar
_UpperCAmelCase : Dict = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_UpperCAmelCase : int = data_dir_format(lowerCamelCase_ )
assert os.path.exists(lowerCamelCase_ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_UpperCAmelCase : Optional[int] = os.path.join(lowerCamelCase_ , "stage-1" )
_UpperCAmelCase : List[Any] = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowerCamelCase_ , lowerCamelCase_ ):
arguments_dict.update({key: value} )
_UpperCAmelCase : Dict = os.path.join(lowerCamelCase_ , "best-checkpoint" , lowerCamelCase_ )
if os.path.exists(lowerCamelCase_ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , lowerCamelCase_ , lowerCamelCase_ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , lowerCamelCase_ )
finetune(**lowerCamelCase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCamelCase_ )
logger.info("Self-training job completed: iteration: %d, stage: 1." , lowerCamelCase_ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_UpperCAmelCase : str = os.path.join(lowerCamelCase_ , "best-checkpoint" )
_UpperCAmelCase : List[str] = os.path.join(lowerCamelCase_ , "stage-2" )
# Update arguments_dict
_UpperCAmelCase : Optional[Any] = model_path
_UpperCAmelCase : List[str] = data_files['''train''']
_UpperCAmelCase : Tuple = current_output_dir
_UpperCAmelCase : List[str] = os.path.join(lowerCamelCase_ , "best-checkpoint" , lowerCamelCase_ )
if os.path.exists(lowerCamelCase_ ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , lowerCamelCase_ , lowerCamelCase_ , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , lowerCamelCase_ )
finetune(**lowerCamelCase_ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCamelCase_ )
logger.info("Self-training job completed: iteration: %d, stage: 2." , lowerCamelCase_ )
_UpperCAmelCase : List[str] = iteration
_UpperCAmelCase : Dict = data_dir_format(iteration + 1 )
_UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(os.path.join(lowerCamelCase_ , "best-checkpoint" ) )
_UpperCAmelCase : str = config.idalabel
_UpperCAmelCase : int = os.path.join(lowerCamelCase_ , "eval_results_best-checkpoint.json" )
_UpperCAmelCase : Any = os.path.join(lowerCamelCase_ , "test_results_best-checkpoint.json" )
assert os.path.exists(lowerCamelCase_ )
with open(lowerCamelCase_ , "r" ) as f:
_UpperCAmelCase : List[str] = float(json.load(lowerCamelCase_ )[args.eval_metric] )
_UpperCAmelCase : Optional[Any] = os.path.join(lowerCamelCase_ , "infer_output_best-checkpoint.csv" )
assert os.path.exists(lowerCamelCase_ )
# Loading the dataset from local csv or json files.
_UpperCAmelCase : Optional[int] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )['''data''']
_UpperCAmelCase : Tuple = load_dataset("csv" , data_files={"data": infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
shutil.copy(lowerCamelCase_ , os.path.join(lowerCamelCase_ , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(lowerCamelCase_ ):
shutil.copy(lowerCamelCase_ , os.path.join(lowerCamelCase_ , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
accelerator.wait_for_everyone()
_UpperCAmelCase : Union[str, Any] = os.path.join(lowerCamelCase_ , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_UpperCAmelCase : Optional[Any] = eval_result
if best_iteration is None:
_UpperCAmelCase : Dict = new_iteration
_UpperCAmelCase : Union[str, Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_UpperCAmelCase : List[str] = new_iteration
_UpperCAmelCase : Union[str, Any] = new_eval_result
_UpperCAmelCase : Optional[int] = 0
else:
if new_eval_result == best_eval_result:
_UpperCAmelCase : Dict = new_iteration
_UpperCAmelCase : Any = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_UpperCAmelCase : Union[str, Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , lowerCamelCase_ )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , lowerCamelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCamelCase_ , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(lowerCamelCase_ , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , lowerCamelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCamelCase_ , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(lowerCamelCase_ , "eval_results_best-iteration.json" ) , )
| 701
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 0
|
class lowerCAmelCase__ :
def __init__( self : str , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = val
_UpperCAmelCase : Dict = None
_UpperCAmelCase : List[str] = None
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[str] ) ->Dict:
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
_UpperCAmelCase : Any = Node(_lowercase )
else:
self.left.insert(_lowercase )
elif val > self.val:
if self.right is None:
_UpperCAmelCase : str = Node(_lowercase )
else:
self.right.insert(_lowercase )
else:
_UpperCAmelCase : Optional[int] = val
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if root:
inorder(root.left , UpperCamelCase__ )
res.append(root.val )
inorder(root.right , UpperCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
if len(UpperCamelCase__ ) == 0:
return arr
_UpperCAmelCase : str = Node(arr[0] )
for i in range(1 , len(UpperCamelCase__ ) ):
root.insert(arr[i] )
# Traverse BST in order.
_UpperCAmelCase : Optional[int] = []
inorder(UpperCamelCase__ , UpperCamelCase__ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 702
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 0
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch "
"helper utility that will spawn up "
"multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=__SCREAMING_SNAKE_CASE , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=__SCREAMING_SNAKE_CASE , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=__SCREAMING_SNAKE_CASE )
return parser.parse_args()
def __lowerCAmelCase ():
_UpperCAmelCase : Any = parse_args()
# Import training_script as a module.
_UpperCAmelCase : Optional[int] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Tuple = script_fpath.stem
_UpperCAmelCase : Union[str, Any] = importlib.import_module(__SCREAMING_SNAKE_CASE )
# Patch sys.argv
_UpperCAmelCase : int = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 703
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 0
|
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = "https://openaipublic.azureedge.net/jukebox/models/"
lowerCamelCase__ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def __lowerCAmelCase (__lowerCAmelCase ) -> Union[str, Any]:
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_UpperCAmelCase : List[Any] = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_UpperCAmelCase : List[str] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_UpperCAmelCase : List[Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_UpperCAmelCase : Dict = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_UpperCAmelCase : Any = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_UpperCAmelCase : str = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_UpperCAmelCase : List[str] = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_UpperCAmelCase : List[Any] = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
_UpperCAmelCase : Optional[Any] = {}
import re
_UpperCAmelCase : Union[str, Any] = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_UpperCAmelCase : Tuple = re.compile(
R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_UpperCAmelCase : Any = re.compile(R"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_UpperCAmelCase : Dict = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_UpperCAmelCase : str = re.compile(
R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_UpperCAmelCase : Tuple = re.compile(R"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_UpperCAmelCase : List[Any] = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_UpperCAmelCase : str = re.compile(
R"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_UpperCAmelCase : List[str] = re.compile(R"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re_encoder_block_conv_in.match(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = regex_match.groups()
_UpperCAmelCase : str = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase : Optional[Any] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
_UpperCAmelCase : List[Any] = re_encoder_block_conv_in.sub(__lowerCAmelCase , __lowerCAmelCase )
elif re_encoder_block_resnet.fullmatch(__lowerCAmelCase ):
_UpperCAmelCase : Dict = re_encoder_block_resnet.match(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = regex_match.groups()
_UpperCAmelCase : str = int(groups[2] ) * 2 + int(groups[3] )
_UpperCAmelCase : List[Any] = {"1": 1, "3": 2}[groups[-2]]
_UpperCAmelCase : List[str] = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
_UpperCAmelCase : Any = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_UpperCAmelCase : List[Any] = prefix + resnet_block
_UpperCAmelCase : Any = re_encoder_block_resnet.sub(__lowerCAmelCase , __lowerCAmelCase )
elif re_encoder_block_proj_out.fullmatch(__lowerCAmelCase ):
_UpperCAmelCase : Any = re_encoder_block_proj_out.match(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = regex_match.groups()
_UpperCAmelCase : Tuple = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
_UpperCAmelCase : int = re_encoder_block_proj_out.sub(__lowerCAmelCase , __lowerCAmelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = re_decoder_block_conv_out.match(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = regex_match.groups()
_UpperCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase : Optional[Any] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
_UpperCAmelCase : List[Any] = re_decoder_block_conv_out.sub(__lowerCAmelCase , __lowerCAmelCase )
elif re_decoder_block_resnet.fullmatch(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = re_decoder_block_resnet.match(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = regex_match.groups()
_UpperCAmelCase : int = int(groups[2] ) * 2 + int(groups[3] ) - 2
_UpperCAmelCase : List[Any] = {"1": 1, "3": 2}[groups[-2]]
_UpperCAmelCase : Optional[Any] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
_UpperCAmelCase : Optional[int] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_UpperCAmelCase : List[str] = prefix + resnet_block
_UpperCAmelCase : Dict = re_decoder_block_resnet.sub(__lowerCAmelCase , __lowerCAmelCase )
elif re_decoder_block_proj_in.fullmatch(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = re_decoder_block_proj_in.match(__lowerCAmelCase )
_UpperCAmelCase : Tuple = regex_match.groups()
_UpperCAmelCase : List[Any] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
_UpperCAmelCase : Any = re_decoder_block_proj_in.sub(__lowerCAmelCase , __lowerCAmelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__lowerCAmelCase ):
_UpperCAmelCase : List[str] = re_prior_cond_conv_out.match(__lowerCAmelCase )
_UpperCAmelCase : Tuple = regex_match.groups()
_UpperCAmelCase : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase : int = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
_UpperCAmelCase : Optional[Any] = re_prior_cond_conv_out.sub(__lowerCAmelCase , __lowerCAmelCase )
elif re_prior_cond_resnet.fullmatch(__lowerCAmelCase ):
_UpperCAmelCase : Dict = re_prior_cond_resnet.match(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = regex_match.groups()
_UpperCAmelCase : int = int(groups[1] ) * 2 + int(groups[2] ) - 2
_UpperCAmelCase : Tuple = {"1": 1, "3": 2}[groups[-2]]
_UpperCAmelCase : Optional[Any] = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
_UpperCAmelCase : Tuple = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
_UpperCAmelCase : Any = prefix + resnet_block
_UpperCAmelCase : int = re_prior_cond_resnet.sub(__lowerCAmelCase , __lowerCAmelCase )
elif re_prior_cond_proj_in.fullmatch(__lowerCAmelCase ):
_UpperCAmelCase : str = re_prior_cond_proj_in.match(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = regex_match.groups()
_UpperCAmelCase : Optional[Any] = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
_UpperCAmelCase : int = re_prior_cond_proj_in.sub(__lowerCAmelCase , __lowerCAmelCase )
# keep original key
else:
_UpperCAmelCase : int = original_key
_UpperCAmelCase : List[str] = replace_key(__lowerCAmelCase )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
_UpperCAmelCase : Optional[int] = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
_UpperCAmelCase : Tuple = original_key
_UpperCAmelCase : List[Any] = original_key
_UpperCAmelCase : Dict = value
return new_dict
@torch.no_grad()
def __lowerCAmelCase (__lowerCAmelCase=None , __lowerCAmelCase=None ) -> Optional[int]:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
_UpperCAmelCase : Union[str, Any] = requests.get(F"""{PREFIX}{file}""" , allow_redirects=__lowerCAmelCase )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=__lowerCAmelCase )
open(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , "wb" ).write(r.content )
_UpperCAmelCase : Union[str, Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_UpperCAmelCase : Dict = JukeboxConfig.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = JukeboxModel(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Union[str, Any] = {}
for i, dict_name in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )["model"]
_UpperCAmelCase : List[Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_UpperCAmelCase : Optional[Any] = old_dic[k]
elif k.endswith(".w" ):
_UpperCAmelCase : Any = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_UpperCAmelCase : Tuple = old_dic[k]
else:
_UpperCAmelCase : List[Any] = old_dic[k]
_UpperCAmelCase : Dict = "vqvae" if i == 0 else F"""priors.{3 - i}"""
_UpperCAmelCase : List[str] = fix_jukebox_keys(__lowerCAmelCase , model.state_dict() , __lowerCAmelCase , __lowerCAmelCase )
weight_dict.append(__lowerCAmelCase )
_UpperCAmelCase : List[str] = weight_dict.pop(0 )
model.vqvae.load_state_dict(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , "w" ) as txtfile:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
return weight_dict
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
lowerCamelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 704
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
import copy
import re
class lowerCAmelCase__ :
lowerCAmelCase : Optional[Any] = "hp"
lowerCAmelCase : Tuple = {}
lowerCAmelCase : Dict = None
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = prefix
_UpperCAmelCase : Optional[Any] = defaults
cls.build_naming_info()
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] ) ->List[Any]:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
return ""
_UpperCAmelCase : Any = None
if any(char.isdigit() for char in word ):
raise Exception(F"""Parameters should not contain numbers: \'{word}\' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(lowerCamelCase__ ) + 1 ):
_UpperCAmelCase : List[str] = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
_UpperCAmelCase : int = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(lowerCamelCase__ : Optional[Any] ):
_UpperCAmelCase : Dict = ""
while integer != 0:
_UpperCAmelCase : Any = chr(ord("A" ) + integer % 10 ) + s
integer //= 10
return s
_UpperCAmelCase : Dict = 0
while True:
_UpperCAmelCase : Any = word + "#" + int_to_alphabetic(lowerCamelCase__ )
if sword in info["reverse_short_word"]:
continue
else:
_UpperCAmelCase : Dict = sword
break
_UpperCAmelCase : List[Any] = short_word
_UpperCAmelCase : Dict = word
return short_word
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : int , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = param_name.split("_" )
_UpperCAmelCase : Optional[int] = [TrialShortNamer.shortname_for_word(lowerCamelCase__ , lowerCamelCase__ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
_UpperCAmelCase : Tuple = ["", "_"]
for separator in separators:
_UpperCAmelCase : str = separator.join(lowerCamelCase__ )
if shortname not in info["reverse_short_param"]:
_UpperCAmelCase : str = shortname
_UpperCAmelCase : int = param_name
return shortname
return param_name
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = TrialShortNamer.shortname_for_key(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = short_name
_UpperCAmelCase : str = param_name
@classmethod
def lowerCAmelCase__ ( cls : int ) ->List[str]:
'''simple docstring'''
if cls.NAMING_INFO is not None:
return
_UpperCAmelCase : Optional[Any] = {
"short_word": {},
"reverse_short_word": {},
"short_param": {},
"reverse_short_param": {},
}
_UpperCAmelCase : List[str] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = info
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
cls.build_naming_info()
assert cls.PREFIX is not None
_UpperCAmelCase : Dict = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
_UpperCAmelCase : Any = cls.NAMING_INFO["short_param"][k]
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = 1 if v else 0
_UpperCAmelCase : Tuple = "" if isinstance(lowerCamelCase__ , (int, float) ) else "-"
_UpperCAmelCase : List[Any] = F"""{key}{sep}{v}"""
name.append(lowerCamelCase__ )
return "_".join(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : str , lowerCamelCase__ : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
_UpperCAmelCase : List[str] = []
else:
_UpperCAmelCase : List[str] = repr.split("_" )
_UpperCAmelCase : Optional[Any] = {}
for value in values:
if "-" in value:
_UpperCAmelCase : Optional[int] = value.split("-" )
else:
_UpperCAmelCase : Optional[int] = re.sub("[0-9.]" , "" , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = float(re.sub("[^0-9.]" , "" , lowerCamelCase__ ) )
_UpperCAmelCase : List[str] = cls.NAMING_INFO["reverse_short_param"][p_k]
_UpperCAmelCase : int = p_v
for k in cls.DEFAULTS:
if k not in parameters:
_UpperCAmelCase : Union[str, Any] = cls.DEFAULTS[k]
return parameters
| 705
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 706
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 0
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( A_ ):
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[Any] ) ->int:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : int , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) ->List[Any]:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCamelCase__ ) )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : str = [sequences]
_UpperCAmelCase : Optional[int] = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(A_ )
class lowerCAmelCase__ ( A_ ):
def __init__( self : List[Any] , lowerCamelCase__ : Dict=ZeroShotClassificationArgumentHandler() , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = args_parser
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Union[str, Any]=TruncationStrategy.ONLY_FIRST , **lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
_UpperCAmelCase : str = self.tokenizer.eos_token
try:
_UpperCAmelCase : List[Any] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , )
except Exception as e:
if "too short" in str(lowerCamelCase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCAmelCase : Dict = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase__ ( self : Any , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
if kwargs.get("multi_class" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : int = kwargs["""multi_class"""]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
_UpperCAmelCase : int = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : List[Any] = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
_UpperCAmelCase : Any = kwargs["""hypothesis_template"""]
_UpperCAmelCase : List[Any] = {}
if "multi_label" in kwargs:
_UpperCAmelCase : Any = kwargs["""multi_label"""]
return preprocess_params, {}, postprocess_params
def __call__( self : Dict , lowerCamelCase__ : Union[str, Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : str , ) ->Tuple:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
pass
elif len(lowerCamelCase__ ) == 1 and "candidate_labels" not in kwargs:
_UpperCAmelCase : Union[str, Any] = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Optional[int]="This example is {}." ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self._args_parser(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
_UpperCAmelCase : Union[str, Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase__ ) - 1,
**model_input,
}
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
_UpperCAmelCase : int = inputs["""candidate_label"""]
_UpperCAmelCase : int = inputs["""sequence"""]
_UpperCAmelCase : str = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCAmelCase : Optional[int] = self.model(**lowerCamelCase__ )
_UpperCAmelCase : str = {
"""candidate_label""": candidate_label,
"""sequence""": sequence,
"""is_last""": inputs["""is_last"""],
**outputs,
}
return model_outputs
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any]=False ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = [outputs["""candidate_label"""] for outputs in model_outputs]
_UpperCAmelCase : Optional[int] = [outputs["""sequence"""] for outputs in model_outputs]
_UpperCAmelCase : Any = np.concatenate([output["logits"].numpy() for output in model_outputs] )
_UpperCAmelCase : List[Any] = logits.shape[0]
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ )
_UpperCAmelCase : int = N // n
_UpperCAmelCase : int = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCAmelCase : int = self.entailment_id
_UpperCAmelCase : str = -1 if entailment_id == 0 else 0
_UpperCAmelCase : int = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCAmelCase : Optional[int] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : Tuple = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCAmelCase : Dict = reshaped_outputs[..., self.entailment_id]
_UpperCAmelCase : str = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : int = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 707
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 0
|
'''simple docstring'''
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCamelCase__ = HfApi()
lowerCamelCase__ = {}
# fmt: off
lowerCamelCase__ = torch.tensor([
-0.7_515, -1.6_883, 0.2_420, 0.0_300, 0.6_347, 1.3_433, -1.1_743, -3.7_467,
1.2_342, -2.2_485, 0.4_636, 0.8_076, -0.7_991, 0.3_969, 0.8_498, 0.9_189,
-1.8_887, -3.3_522, 0.7_639, 0.2_040, 0.6_271, -2.7_148, -1.6_316, 3.0_839,
0.3_186, 0.2_721, -0.9_759, -1.2_461, 2.6_257, 1.3_557
])
lowerCamelCase__ = torch.tensor([
-2.3_639, -2.5_344, 0.0_054, -0.6_674, 1.5_990, 1.0_158, 0.3_124, -2.1_436,
1.8_795, -2.5_429, -0.1_566, -0.3_973, 1.2_490, 2.6_447, 1.2_283, -0.5_208,
-2.8_154, -3.5_119, 2.3_838, 1.2_033, 1.7_201, -2.1_256, -1.4_576, 2.7_948,
2.4_204, -0.9_752, -1.2_546, 0.8_027, 3.2_758, 3.1_365
])
lowerCamelCase__ = torch.tensor([
-0.6_531, -0.6_891, -0.3_172, -0.5_375, -0.9_140, -0.5_367, -0.1_175, -0.7_869,
-0.3_808, -0.4_513, -0.2_098, -0.0_083, 0.3_183, 0.5_140, 0.2_247, -0.1_304,
-0.1_302, -0.2_802, -0.2_084, -0.2_025, -0.4_967, -0.4_873, -0.0_861, 0.6_925,
0.0_250, 0.1_290, -0.1_543, 0.6_316, 1.0_460, 1.4_943
])
lowerCamelCase__ = torch.tensor([
0.0_911, 0.1_107, 0.0_182, 0.0_435, -0.0_805, -0.0_608, 0.0_381, 0.2_172,
-0.0_280, 0.1_327, -0.0_299, -0.0_255, -0.0_050, -0.1_170, -0.1_046, 0.0_309,
0.1_367, 0.1_728, -0.0_533, -0.0_748, -0.0_534, 0.1_624, 0.0_384, -0.1_805,
-0.0_707, 0.0_642, 0.0_220, -0.0_134, -0.1_333, -0.1_505
])
lowerCamelCase__ = torch.tensor([
0.1_321, 0.1_337, 0.0_440, 0.0_622, -0.0_591, -0.0_370, 0.0_503, 0.2_133,
-0.0_177, 0.1_415, -0.0_116, -0.0_112, 0.0_044, -0.0_980, -0.0_789, 0.0_395,
0.1_502, 0.1_785, -0.0_488, -0.0_514, -0.0_404, 0.1_539, 0.0_454, -0.1_559,
-0.0_665, 0.0_659, 0.0_383, -0.0_005, -0.1_266, -0.1_386
])
lowerCamelCase__ = torch.tensor([
0.1_154, 0.1_218, 0.0_307, 0.0_526, -0.0_711, -0.0_541, 0.0_366, 0.2_078,
-0.0_267, 0.1_317, -0.0_226, -0.0_193, -0.0_014, -0.1_055, -0.0_902, 0.0_330,
0.1_391, 0.1_709, -0.0_562, -0.0_693, -0.0_560, 0.1_482, 0.0_381, -0.1_683,
-0.0_681, 0.0_661, 0.0_331, -0.0_046, -0.1_268, -0.1_431
])
lowerCamelCase__ = torch.tensor([
0.1_192, 0.1_240, 0.0_414, 0.0_606, -0.0_557, -0.0_412, 0.0_430, 0.2_042,
-0.0_200, 0.1_385, -0.0_115, -0.0_132, 0.0_017, -0.0_965, -0.0_802, 0.0_398,
0.1_433, 0.1_747, -0.0_458, -0.0_533, -0.0_407, 0.1_545, 0.0_419, -0.1_574,
-0.0_645, 0.0_626, 0.0_341, -0.0_010, -0.1_199, -0.1_390
])
lowerCamelCase__ = torch.tensor([
0.1_075, 0.1_074, 0.0_205, 0.0_431, -0.0_774, -0.0_607, 0.0_298, 0.2_042,
-0.0_320, 0.1_267, -0.0_281, -0.0_250, -0.0_064, -0.1_091, -0.0_946, 0.0_290,
0.1_328, 0.1_650, -0.0_580, -0.0_738, -0.0_586, 0.1_440, 0.0_337, -0.1_746,
-0.0_712, 0.0_605, 0.0_250, -0.0_099, -0.1_316, -0.1_473
])
lowerCamelCase__ = torch.tensor([
-1.4_572, -2.0_481, -0.0_414, -0.6_005, 1.4_136, 0.5_848, 0.4_028, -2.7_330,
1.2_212, -2.1_228, 0.2_155, 0.4_039, 0.7_662, 2.0_535, 0.7_477, -0.3_243,
-2.1_758, -2.7_648, 1.6_947, 0.7_026, 1.2_338, -1.6_078, -0.8_682, 2.2_810,
1.8_574, -0.5_718, -0.5_586, -0.0_186, 2.3_415, 2.1_251])
lowerCamelCase__ = torch.tensor([
-1.3_690, -1.9_720, -0.4_090, -0.6_966, 1.4_660, 0.9_938, -0.1_385, -2.7_324,
0.7_736, -1.8_917, 0.2_923, 0.4_293, 0.1_693, 1.4_112, 1.1_887, -0.3_181,
-2.2_160, -2.6_381, 1.3_170, 0.8_163, 0.9_240, -1.6_544, -0.6_099, 2.5_259,
1.6_430, -0.9_090, -0.9_392, -0.0_126, 2.4_268, 2.3_266
])
lowerCamelCase__ = torch.tensor([
-1.3_525, -1.9_628, -0.3_956, -0.6_860, 1.4_664, 1.0_014, -0.1_259, -2.7_212,
0.7_772, -1.8_811, 0.2_996, 0.4_388, 0.1_704, 1.4_029, 1.1_701, -0.3_027,
-2.2_053, -2.6_287, 1.3_350, 0.8_131, 0.9_274, -1.6_292, -0.6_098, 2.5_131,
1.6_505, -0.8_958, -0.9_298, -0.0_151, 2.4_257, 2.3_355
])
lowerCamelCase__ = torch.tensor([
-2.0_585, -2.7_897, -0.2_850, -0.8_940, 1.9_052, 0.5_702, 0.6_345, -3.8_959,
1.5_932, -3.2_319, 0.1_974, 0.0_287, 1.7_566, 2.6_543, 0.8_387, -0.5_351,
-3.2_736, -4.3_375, 2.9_029, 1.6_390, 1.4_640, -2.1_701, -1.9_013, 2.9_341,
3.4_981, -0.6_255, -1.1_644, -0.1_591, 3.7_097, 3.2_066
])
lowerCamelCase__ = torch.tensor([
-2.3_139, -2.5_594, -0.0_197, -0.6_785, 1.7_001, 1.1_606, 0.3_075, -2.1_740,
1.8_071, -2.5_630, -0.0_926, -0.3_811, 1.2_116, 2.6_246, 1.2_731, -0.5_398,
-2.8_153, -3.6_140, 2.3_893, 1.3_262, 1.6_258, -2.1_856, -1.3_267, 2.8_395,
2.3_779, -1.0_623, -1.2_468, 0.8_959, 3.3_367, 3.2_243
])
lowerCamelCase__ = torch.tensor([
-2.0_628, -2.7_667, -0.2_089, -0.8_263, 2.0_539, 0.5_992, 0.6_495, -3.8_336,
1.6_025, -3.2_817, 0.1_721, -0.0_633, 1.7_516, 2.7_039, 0.8_100, -0.5_908,
-3.2_113, -4.4_343, 2.9_257, 1.3_632, 1.5_562, -2.1_489, -1.9_894, 3.0_560,
3.3_396, -0.7_328, -1.0_417, 0.0_383, 3.7_093, 3.2_343
])
lowerCamelCase__ = torch.tensor([
-1.4_574, -2.0_569, -0.0_473, -0.6_117, 1.4_018, 0.5_769, 0.4_129, -2.7_344,
1.2_241, -2.1_397, 0.2_000, 0.3_937, 0.7_616, 2.0_453, 0.7_324, -0.3_391,
-2.1_746, -2.7_744, 1.6_963, 0.6_921, 1.2_187, -1.6_172, -0.8_877, 2.2_439,
1.8_471, -0.5_839, -0.5_605, -0.0_464, 2.3_250, 2.1_219
])
# fmt: on
lowerCamelCase__ = api.list_models(filter='diffusers')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCamelCase__ = '/home/patrick/google_checkpoints/' + mod.modelId.split('/')[-1]
print(F'''Started running {mod.modelId}!!!''')
if mod.modelId.startswith('CompVis'):
lowerCamelCase__ = UNetaDModel.from_pretrained(local_checkpoint, subfolder='unet')
else:
lowerCamelCase__ = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCamelCase__ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCamelCase__ = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCamelCase__ = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['_'.join('_'.join(mod.modelId.split('/')).split('-'))], atol=1e-3
)
print(F'''{mod.modelId} has passed successfully!!!''')
| 708
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 0
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Optional[Any]=10 , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Dict=64 , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : Optional[int] = use_auxiliary_loss
_UpperCAmelCase : Union[str, Any] = num_queries
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Any = max_size
_UpperCAmelCase : Dict = num_labels
_UpperCAmelCase : Tuple = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase_ )
_UpperCAmelCase : Optional[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase_ )
_UpperCAmelCase : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase_ ) > 0.5
).float()
_UpperCAmelCase : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase_ ) > 0.5).long()
_UpperCAmelCase : Any = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : List[str] = self.num_labels
_UpperCAmelCase : Optional[Any] = [1, 1, 1, 1]
_UpperCAmelCase : str = self.num_channels
_UpperCAmelCase : Any = 64
_UpperCAmelCase : str = 1_28
_UpperCAmelCase : Tuple = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : int ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = self.prepare_config_and_inputs()
_UpperCAmelCase : Any = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = output.encoder_hidden_states
_UpperCAmelCase : int = output.pixel_decoder_hidden_states
_UpperCAmelCase : List[str] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase_ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=False ) ->Tuple:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
_UpperCAmelCase : str = model(pixel_values=lowerCamelCase_ , pixel_mask=lowerCamelCase_ )
_UpperCAmelCase : int = model(lowerCamelCase_ , output_hidden_states=lowerCamelCase_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = MaskaFormerForUniversalSegmentation(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(pixel_values=lowerCamelCase_ , pixel_mask=lowerCamelCase_ )
_UpperCAmelCase : List[str] = model(lowerCamelCase_ )
comm_check_on_output(lowerCamelCase_ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase_ , pixel_mask=lowerCamelCase_ , mask_labels=lowerCamelCase_ , class_labels=lowerCamelCase_ )
comm_check_on_output(lowerCamelCase_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( a__ , a__ , unittest.TestCase ):
lowerCAmelCase : Tuple = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : Any = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : str = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Dict = False
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerModelTester(self )
_UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase_ , has_text_modality=lowerCamelCase_ )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase_ , **lowerCamelCase_ , output_hidden_states=lowerCamelCase_ )
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase_ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase_ )
_UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : List[Any] = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase_ )
@slow
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : Optional[Any] = MaskaFormerModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = (self.model_tester.min_size,) * 2
_UpperCAmelCase : str = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase_ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase_ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase_ ).long(),
}
_UpperCAmelCase : Any = self.model_tester.get_config()
_UpperCAmelCase : Any = MaskaFormerForUniversalSegmentation(lowerCamelCase_ ).to(lowerCamelCase_ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase_ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase_ , **lowerCamelCase_ , output_hidden_states=lowerCamelCase_ )
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = model_class(lowerCamelCase_ ).to(lowerCamelCase_ )
_UpperCAmelCase : Any = model(**lowerCamelCase_ , output_attentions=lowerCamelCase_ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Union[str, Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.train()
_UpperCAmelCase : List[Any] = model(lowerCamelCase_ , mask_labels=lowerCamelCase_ , class_labels=lowerCamelCase_ ).loss
loss.backward()
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase_ ).to(lowerCamelCase_ )
model.train()
_UpperCAmelCase : str = model(lowerCamelCase_ , mask_labels=lowerCamelCase_ , class_labels=lowerCamelCase_ )
_UpperCAmelCase : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Any = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Any = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase_ )
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Union[str, Any] = image_processor(lowerCamelCase_ , return_tensors="pt" ).to(lowerCamelCase_ )
_UpperCAmelCase : int = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase_ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : Dict = model(**lowerCamelCase_ )
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
_UpperCAmelCase : Any = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase_ ).eval()
_UpperCAmelCase : Any = self.default_image_processor
_UpperCAmelCase : Optional[int] = prepare_img()
_UpperCAmelCase : int = image_processor(lowerCamelCase_ , return_tensors="pt" ).to(lowerCamelCase_ )
_UpperCAmelCase : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase_ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : Any = model(**lowerCamelCase_ )
# masks_queries_logits
_UpperCAmelCase : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : Dict = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : Any = torch.tensor(lowerCamelCase_ ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
# class_queries_logits
_UpperCAmelCase : int = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase_ , atol=lowerCamelCase_ ) )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase_ ).eval()
_UpperCAmelCase : str = self.default_image_processor
_UpperCAmelCase : Dict = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : Optional[Any] = inputs["pixel_values"].to(lowerCamelCase_ )
_UpperCAmelCase : Optional[Any] = [el.to(lowerCamelCase_ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : Tuple = [el.to(lowerCamelCase_ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCamelCase_ )
self.assertTrue(outputs.loss is not None )
| 709
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 0
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Any=2_24 , lowerCamelCase__ : List[str]=10_00 , lowerCamelCase__ : List[Any]=[3, 3, 6, 4] , lowerCamelCase__ : Union[str, Any]=[48, 56, 1_12, 2_20] , ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : Union[str, Any] = num_channels
_UpperCAmelCase : int = is_training
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : Dict = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = num_labels
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : Any = layer_depths
_UpperCAmelCase : List[Any] = embed_dims
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : str = None
if self.use_labels:
_UpperCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=UpperCamelCase__ , layer_scale_init_value=1E-5 , )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = SwiftFormerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.num_labels
_UpperCAmelCase : List[str] = SwiftFormerForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : Dict = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_UpperCAmelCase : List[Any] = SwiftFormerForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : str = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : Tuple = self.prepare_config_and_inputs()
_UpperCAmelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCAmelCase : Dict = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase : Any = (
{'''feature-extraction''': SwiftFormerModel, '''image-classification''': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : List[str] = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : str = False
def lowerCAmelCase__ ( self : Optional[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = SwiftFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(
self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(UpperCamelCase__ )
_UpperCAmelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = model_class(UpperCamelCase__ )
_UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Any = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[Any] = SwiftFormerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : int ) ->List[Any]:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int ):
_UpperCAmelCase : Tuple = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCAmelCase : List[Any] = outputs.hidden_states
_UpperCAmelCase : List[str] = 8
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(UpperCamelCase__ ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Any = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : List[Any] = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->int:
'''simple docstring'''
def _config_zero_init(lowerCamelCase__ : int ):
_UpperCAmelCase : Any = copy.deepcopy(UpperCamelCase__ )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(UpperCamelCase__ , UpperCamelCase__ , 1E-10 )
if isinstance(getattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ ):
_UpperCAmelCase : Optional[int] = _config_zero_init(getattr(UpperCamelCase__ , UpperCamelCase__ ) )
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return configs_no_init
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Union[str, Any] = _config_zero_init(UpperCamelCase__ )
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(config=UpperCamelCase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
pass
def __lowerCAmelCase ():
_UpperCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(UpperCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : Union[str, Any] = image_processor(images=UpperCamelCase__ , return_tensors="pt" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**UpperCamelCase__ )
# verify the logits
_UpperCAmelCase : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = torch.tensor([[-2.1703E00, 2.1107E00, -2.0811E00]] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 710
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCamelCase__ = 1.054571817e-34 # unit of ℏ : J * s
lowerCamelCase__ = 3e8 # unit of c : m * s^-1
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_UpperCAmelCase : List[Any] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_UpperCAmelCase : Optional[Any] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_UpperCAmelCase : Optional[Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 0
|
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (__lowerCAmelCase ):
print("Loading config file..." )
def flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase="." ):
_UpperCAmelCase : Union[str, Any] = []
for k, v in d.items():
_UpperCAmelCase : List[Any] = parent_key + sep + k if parent_key else k
if isinstance(a_ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(a_ , a_ , sep=a_ ).items() )
else:
items.append((new_key, v) )
return dict(a_ )
_UpperCAmelCase : Optional[int] = argparse.Namespace()
with open(a_ , "r" ) as yaml_file:
try:
_UpperCAmelCase : Tuple = yaml.load(a_ , Loader=yaml.FullLoader )
_UpperCAmelCase : str = flatten_yaml_as_dict(a_ )
for k, v in flat_cfg.items():
setattr(a_ , a_ , a_ )
except yaml.YAMLError as exc:
logger.error("Error while loading config file: {}. Error message: {}".format(a_ , str(a_ ) ) )
return config
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = MobileViTVaConfig()
_UpperCAmelCase : Tuple = False
# dataset
if task_name.startswith("imagenet1k_" ):
_UpperCAmelCase : Any = 1_000
if int(task_name.strip().split("_" )[-1] ) == 384:
_UpperCAmelCase : Union[str, Any] = 384
else:
_UpperCAmelCase : Optional[Any] = 256
_UpperCAmelCase : Tuple = '''imagenet-1k-id2label.json'''
elif task_name.startswith("imagenet21k_to_1k_" ):
_UpperCAmelCase : List[Any] = 21_000
if int(task_name.strip().split("_" )[-1] ) == 384:
_UpperCAmelCase : Dict = 384
else:
_UpperCAmelCase : str = 256
_UpperCAmelCase : Tuple = '''imagenet-22k-id2label.json'''
elif task_name.startswith("ade20k_" ):
_UpperCAmelCase : Optional[Any] = 151
_UpperCAmelCase : Any = 512
_UpperCAmelCase : List[Any] = '''ade20k-id2label.json'''
_UpperCAmelCase : Tuple = True
elif task_name.startswith("voc_" ):
_UpperCAmelCase : Union[str, Any] = 21
_UpperCAmelCase : str = 512
_UpperCAmelCase : int = '''pascal-voc-id2label.json'''
_UpperCAmelCase : List[str] = True
# orig_config
_UpperCAmelCase : List[str] = load_orig_config_file(a_ )
assert getattr(a_ , "model.classification.name" , -1 ) == "mobilevit_v2", "Invalid model"
_UpperCAmelCase : Any = getattr(a_ , "model.classification.mitv2.width_multiplier" , 1.0 )
assert (
getattr(a_ , "model.classification.mitv2.attn_norm_layer" , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_UpperCAmelCase : int = getattr(a_ , "model.classification.activation.name" , "swish" )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_UpperCAmelCase : str = getattr(a_ , "model.segmentation.output_stride" , 16 )
if "_deeplabv3" in task_name:
_UpperCAmelCase : int = getattr(a_ , "model.segmentation.deeplabv3.aspp_rates" , [12, 24, 36] )
_UpperCAmelCase : Optional[int] = getattr(a_ , "model.segmentation.deeplabv3.aspp_out_channels" , 512 )
_UpperCAmelCase : List[Any] = getattr(a_ , "model.segmentation.deeplabv3.aspp_dropout" , 0.1 )
# id2label
_UpperCAmelCase : List[Any] = '''huggingface/label-files'''
_UpperCAmelCase : Any = json.load(open(hf_hub_download(a_ , a_ , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : str = {int(a_ ): v for k, v in idalabel.items()}
_UpperCAmelCase : int = idalabel
_UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()}
return config
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = dct.pop(a_ )
_UpperCAmelCase : int = val
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False ):
if base_model:
_UpperCAmelCase : List[str] = ''''''
else:
_UpperCAmelCase : Union[str, Any] = '''mobilevitv2.'''
_UpperCAmelCase : Tuple = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_UpperCAmelCase : Optional[int] = k[8:]
else:
_UpperCAmelCase : Optional[int] = k
if ".block." in k:
_UpperCAmelCase : int = k_new.replace(".block." , "." )
if ".conv." in k:
_UpperCAmelCase : int = k_new.replace(".conv." , ".convolution." )
if ".norm." in k:
_UpperCAmelCase : Optional[int] = k_new.replace(".norm." , ".normalization." )
if "conv_1." in k:
_UpperCAmelCase : str = k_new.replace("conv_1." , F"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if F"""layer_{i}.""" in k:
_UpperCAmelCase : Union[str, Any] = k_new.replace(F"""layer_{i}.""" , F"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
_UpperCAmelCase : List[str] = k_new.replace(".exp_1x1." , ".expand_1x1." )
if ".red_1x1." in k:
_UpperCAmelCase : str = k_new.replace(".red_1x1." , ".reduce_1x1." )
for i in [3, 4, 5]:
if F"""layer_{i}.0.""" in k:
_UpperCAmelCase : List[Any] = k_new.replace(F"""layer_{i}.0.""" , F"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if F"""layer_{i}.1.local_rep.0.""" in k:
_UpperCAmelCase : Tuple = k_new.replace(F"""layer_{i}.1.local_rep.0.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if F"""layer_{i}.1.local_rep.1.""" in k:
_UpperCAmelCase : Optional[int] = k_new.replace(F"""layer_{i}.1.local_rep.1.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
_UpperCAmelCase : int = [0, 1]
elif i == 4:
_UpperCAmelCase : int = [0, 1, 2, 3]
elif i == 5:
_UpperCAmelCase : Union[str, Any] = [0, 1, 2]
for j in j_in:
if F"""layer_{i}.1.global_rep.{j}.""" in k:
_UpperCAmelCase : Dict = k_new.replace(
F"""layer_{i}.1.global_rep.{j}.""" , F"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if F"""layer_{i}.1.global_rep.{j+1}.""" in k:
_UpperCAmelCase : Dict = k_new.replace(
F"""layer_{i}.1.global_rep.{j+1}.""" , F"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if F"""layer_{i}.1.conv_proj.""" in k:
_UpperCAmelCase : int = k_new.replace(F"""layer_{i}.1.conv_proj.""" , F"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
_UpperCAmelCase : Dict = k_new.replace("pre_norm_attn.0." , "layernorm_before." )
if "pre_norm_attn.1." in k:
_UpperCAmelCase : List[str] = k_new.replace("pre_norm_attn.1." , "attention." )
if "pre_norm_ffn.0." in k:
_UpperCAmelCase : List[Any] = k_new.replace("pre_norm_ffn.0." , "layernorm_after." )
if "pre_norm_ffn.1." in k:
_UpperCAmelCase : Optional[Any] = k_new.replace("pre_norm_ffn.1." , "ffn.conv1." )
if "pre_norm_ffn.3." in k:
_UpperCAmelCase : Tuple = k_new.replace("pre_norm_ffn.3." , "ffn.conv2." )
if "classifier.1." in k:
_UpperCAmelCase : Any = k_new.replace("classifier.1." , "classifier." )
if "seg_head." in k:
_UpperCAmelCase : Optional[int] = k_new.replace("seg_head." , "segmentation_head." )
if ".aspp_layer." in k:
_UpperCAmelCase : Any = k_new.replace(".aspp_layer." , "." )
if ".aspp_pool." in k:
_UpperCAmelCase : int = k_new.replace(".aspp_pool." , "." )
rename_keys.append((k, k_new) )
return rename_keys
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Any = []
for k in state_dict.keys():
if k.startswith("seg_head.aux_head." ):
keys_to_ignore.append(a_ )
for k in keys_to_ignore:
state_dict.pop(a_ , a_ )
def __lowerCAmelCase ():
_UpperCAmelCase : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_UpperCAmelCase : str = Image.open(requests.get(a_ , stream=a_ ).raw )
return im
@torch.no_grad()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = get_mobilevitva_config(a_ , a_ )
# load original state_dict
_UpperCAmelCase : Optional[int] = torch.load(a_ , map_location="cpu" )
# load huggingface model
if task_name.startswith("ade20k_" ) or task_name.startswith("voc_" ):
_UpperCAmelCase : Optional[Any] = MobileViTVaForSemanticSegmentation(a_ ).eval()
_UpperCAmelCase : str = False
else:
_UpperCAmelCase : List[Any] = MobileViTVaForImageClassification(a_ ).eval()
_UpperCAmelCase : Dict = False
# remove and rename some keys of load the original model
_UpperCAmelCase : List[str] = checkpoint
remove_unused_keys(a_ )
_UpperCAmelCase : Optional[int] = create_rename_keys(a_ , base_model=a_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(a_ , a_ , a_ )
# load modified state_dict
model.load_state_dict(a_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
_UpperCAmelCase : Tuple = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_UpperCAmelCase : Optional[Any] = image_processor(images=prepare_img() , return_tensors="pt" )
_UpperCAmelCase : Optional[int] = model(**a_ )
# verify classification model
if task_name.startswith("imagenet" ):
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase : Dict = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
if task_name.startswith("imagenet1k_256" ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_UpperCAmelCase : Dict = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] , a_ , atol=1e-4 )
Path(a_ ).mkdir(exist_ok=a_ )
print(F"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(a_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task',
default='imagenet1k_256',
type=str,
help=(
'Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '
'\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n '
),
choices=[
'imagenet1k_256',
'imagenet1k_384',
'imagenet21k_to_1k_256',
'imagenet21k_to_1k_384',
'ade20k_deeplabv3',
'voc_deeplabv3',
],
)
parser.add_argument(
'--orig_checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument('--orig_config_path', required=True, type=str, help='Path to the original config file.')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
lowerCamelCase__ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 712
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import MutableSequence
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ) ->Optional[int]:
'''simple docstring'''
if len(_A ) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1." )
_UpperCAmelCase : Union[str, Any] = list(_A )
_UpperCAmelCase : str = degree
def __add__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->List[str]:
'''simple docstring'''
if self.degree > polynomial_a.degree:
_UpperCAmelCase : str = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , _A )
else:
_UpperCAmelCase : str = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , _A )
def __sub__( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : str ) ->List[str]:
'''simple docstring'''
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , lowerCamelCase__ : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , _A )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Optional[int] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = ""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(_A )
return polynomial
def __repr__( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return self.__str__()
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [0] * self.degree
for i in range(self.degree ):
_UpperCAmelCase : Any = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , _A )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] = 0 ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = [0] * (self.degree + 2)
_UpperCAmelCase : int = constant
for i in range(self.degree + 1 ):
_UpperCAmelCase : Union[str, Any] = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , _A )
def __eq__( self : int , lowerCamelCase__ : Any ) ->Any:
'''simple docstring'''
if not isinstance(_A , _A ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , lowerCamelCase__ : Tuple ) ->Optional[Any]:
'''simple docstring'''
return not self.__eq__(_A )
| 713
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 0
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = [[1, 2, 4], [1, 2, 3, 4]]
_UpperCAmelCase : Dict = DisjunctiveConstraint(__UpperCamelCase )
self.assertTrue(isinstance(dc.token_ids , __UpperCamelCase ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__UpperCamelCase ):
DisjunctiveConstraint(__UpperCamelCase ) # fails here
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = [[1, 2, 3], [1, 2, 4]]
_UpperCAmelCase : List[Any] = DisjunctiveConstraint(__UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = dc.update(1 )
_UpperCAmelCase : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = dc.update(2 )
_UpperCAmelCase : Dict = stepped is True and completed is False and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = dc.update(3 )
_UpperCAmelCase : int = stepped is True and completed is True and reset is False
self.assertTrue(__UpperCamelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_UpperCAmelCase : int = DisjunctiveConstraint(__UpperCamelCase )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 714
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 0
|
'''simple docstring'''
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = TypeVar('DatasetType', Dataset, IterableDataset)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError("Unable to interleave an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_UpperCamelCase ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.""" )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : Any = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"""{stopping_strategy} is not supported. Please enter a valid stopping_strategy.""" )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
else:
return _interleave_iterable_datasets(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , stopping_strategy=_UpperCamelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 0 , ):
if not dsets:
raise ValueError("Unable to concatenate an empty list of datasets." )
for i, dataset in enumerate(_UpperCamelCase ):
if not isinstance(_UpperCamelCase , (Dataset, IterableDataset) ):
if isinstance(_UpperCamelCase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} """
"is an empty dataset dictionary." )
raise ValueError(
F"""Dataset at position {i} has at least one split: {list(_UpperCamelCase )}\n"""
F"""Please pick one to interleave with the other datasets, for example: dataset['{next(iter(_UpperCamelCase ) )}']""" )
raise ValueError(
F"""Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(_UpperCamelCase ).__name__}.""" )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase : str = (
(Dataset, IterableDataset) if isinstance(_UpperCamelCase , _UpperCamelCase ) else (IterableDataset, Dataset)
)
elif not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError(
F"""Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.""" )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
else:
return _concatenate_iterable_datasets(_UpperCamelCase , info=_UpperCamelCase , split=_UpperCamelCase , axis=_UpperCamelCase )
| 715
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 0
|
'''simple docstring'''
from math import pow
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_UpperCAmelCase : Union[str, Any] = int(pow(__lowerCAmelCase , __lowerCAmelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_UpperCAmelCase , _UpperCAmelCase : List[str] = backtrack(
__lowerCAmelCase , __lowerCAmelCase , current_number + 1 , __lowerCAmelCase , __lowerCAmelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = backtrack(
__lowerCAmelCase , __lowerCAmelCase , current_number + 1 , __lowerCAmelCase , __lowerCAmelCase )
return current_sum, solutions_count
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(__lowerCAmelCase , __lowerCAmelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( __lowerCAmelCase ):
lowerCAmelCase : List[Any] = (DEISMultistepScheduler,)
lowerCAmelCase : Union[str, Any] = (("num_inference_steps", 25),)
def lowerCAmelCase__ ( self : str , **lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : int = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
}
config.update(**lowerCamelCase__ )
return config
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any]=0 , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = dict(self.forward_default_kwargs )
_UpperCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.dummy_sample
_UpperCAmelCase : Dict = 0.1 * sample
_UpperCAmelCase : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Optional[int] = self.get_scheduler_config(**lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
_UpperCAmelCase : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
_UpperCAmelCase : int = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
_UpperCAmelCase : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase : List[Any] = sample, sample
for t in range(lowerCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
_UpperCAmelCase : Optional[int] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
_UpperCAmelCase : Dict = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Dict=0 , **lowerCamelCase__ : Any ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = dict(self.forward_default_kwargs )
_UpperCAmelCase : str = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.dummy_sample
_UpperCAmelCase : Dict = 0.1 * sample
_UpperCAmelCase : Any = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Dict = self.get_scheduler_config()
_UpperCAmelCase : Optional[int] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_UpperCAmelCase : List[Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
_UpperCAmelCase : int = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Tuple=None , **lowerCamelCase__ : int ) ->List[Any]:
'''simple docstring'''
if scheduler is None:
_UpperCAmelCase : str = self.scheduler_classes[0]
_UpperCAmelCase : Any = self.get_scheduler_config(**lowerCamelCase__ )
_UpperCAmelCase : int = scheduler_class(**lowerCamelCase__ )
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(**lowerCamelCase__ )
_UpperCAmelCase : str = scheduler_class(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = 10
_UpperCAmelCase : List[Any] = self.dummy_model()
_UpperCAmelCase : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : str = model(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Dict = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[str] = dict(self.forward_default_kwargs )
_UpperCAmelCase : Union[str, Any] = kwargs.pop("num_inference_steps" , lowerCamelCase__ )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Any = self.get_scheduler_config()
_UpperCAmelCase : Any = scheduler_class(**lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = self.dummy_sample
_UpperCAmelCase : str = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase__ , "set_timesteps" ):
scheduler.set_timesteps(lowerCamelCase__ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase__ , "set_timesteps" ):
_UpperCAmelCase : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase : Union[str, Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
_UpperCAmelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
_UpperCAmelCase : Any = scheduler.timesteps[5]
_UpperCAmelCase : str = scheduler.timesteps[6]
_UpperCAmelCase : Any = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
_UpperCAmelCase : Union[str, Any] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = DEISMultistepScheduler(**self.get_scheduler_config() )
_UpperCAmelCase : Any = self.full_loop(scheduler=lowerCamelCase__ )
_UpperCAmelCase : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
_UpperCAmelCase : Dict = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_UpperCAmelCase : Optional[int] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase : Union[str, Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase : Any = DEISMultistepScheduler.from_config(scheduler.config )
_UpperCAmelCase : Union[str, Any] = self.full_loop(scheduler=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , algorithm_type="deis" , solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
_UpperCAmelCase : Tuple = self.full_loop(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=0 )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.full_loop()
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1E-3
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1E-3
def lowerCAmelCase__ ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config(thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0 )
_UpperCAmelCase : Dict = scheduler_class(**lowerCamelCase__ )
_UpperCAmelCase : str = 10
_UpperCAmelCase : Union[str, Any] = self.dummy_model()
_UpperCAmelCase : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase : Dict = model(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 717
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 0
|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( UpperCamelCase_ ):
lowerCAmelCase : str = (DDIMParallelScheduler,)
lowerCAmelCase : str = (("eta", 0.0), ("num_inference_steps", 50))
def lowerCAmelCase__ ( self : Tuple , **lowerCamelCase__ : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : str = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**lowerCamelCase__ )
return config
def lowerCAmelCase__ ( self : int , **lowerCamelCase__ : Tuple ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.scheduler_classes[0]
_UpperCAmelCase : int = self.get_scheduler_config(**lowerCamelCase__ )
_UpperCAmelCase : str = scheduler_class(**lowerCamelCase__ )
_UpperCAmelCase : str = 10, 0.0
_UpperCAmelCase : Tuple = self.dummy_model()
_UpperCAmelCase : int = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for t in scheduler.timesteps:
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[str] = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def lowerCAmelCase__ ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowerCamelCase__ )
_UpperCAmelCase : Any = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config(steps_offset=1 )
_UpperCAmelCase : Optional[int] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=lowerCamelCase__ , beta_end=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowerCamelCase__ , eta=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.scheduler_classes[0]
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**lowerCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.1_4_7_7_1 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.3_2_4_6_0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.0_2 ) ) < 1E-5
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : List[Any] = self.get_scheduler_config()
_UpperCAmelCase : List[str] = scheduler_class(**lowerCamelCase__ )
_UpperCAmelCase : Dict = 10, 0.0
scheduler.set_timesteps(lowerCamelCase__ )
_UpperCAmelCase : List[str] = self.dummy_model()
_UpperCAmelCase : Tuple = self.dummy_sample_deter
_UpperCAmelCase : Tuple = self.dummy_sample_deter + 0.1
_UpperCAmelCase : str = self.dummy_sample_deter - 0.1
_UpperCAmelCase : List[Any] = samplea.shape[0]
_UpperCAmelCase : Any = torch.stack([samplea, samplea, samplea] , dim=0 )
_UpperCAmelCase : List[str] = torch.arange(lowerCamelCase__ )[0:3, None].repeat(1 , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
_UpperCAmelCase : List[Any] = scheduler.batch_step_no_noise(lowerCamelCase__ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowerCamelCase__ )
_UpperCAmelCase : Any = torch.sum(torch.abs(lowerCamelCase__ ) )
_UpperCAmelCase : int = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_1_4_7.7_9_0_4 ) < 1E-2
assert abs(result_mean.item() - 0.4_9_8_2 ) < 1E-3
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.full_loop()
_UpperCAmelCase : Any = torch.sum(torch.abs(lowerCamelCase__ ) )
_UpperCAmelCase : List[str] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_7_2.0_0_6_7 ) < 1E-2
assert abs(result_mean.item() - 0.2_2_3_9_6_7 ) < 1E-3
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase : str = torch.sum(torch.abs(lowerCamelCase__ ) )
_UpperCAmelCase : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 5_2.5_3_0_2 ) < 1E-2
assert abs(result_mean.item() - 0.0_6_8_4 ) < 1E-3
def lowerCAmelCase__ ( self : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.0_1 )
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_4_9.8_2_9_5 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_5_1 ) < 1E-3
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = self.full_loop(set_alpha_to_one=lowerCamelCase__ , beta_start=0.0_1 )
_UpperCAmelCase : int = torch.sum(torch.abs(lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_sum.item() - 1_4_9.0_7_8_4 ) < 1E-2
assert abs(result_mean.item() - 0.1_9_4_1 ) < 1E-3
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase_ ):
def __init__( self : Tuple , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
warnings.warn(
"The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use DeformableDetrImageProcessor instead." , lowerCamelCase__ , )
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
| 719
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
lowerCamelCase__ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase__ = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase__ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 720
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 100 ):
_UpperCAmelCase : Optional[int] = (n * (n + 1) // 2) ** 2
_UpperCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 721
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = [0] * len(__lowerCAmelCase )
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Any = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__lowerCAmelCase ) ):
if indegree[i] == 0:
queue.append(__lowerCAmelCase )
while queue:
_UpperCAmelCase : List[Any] = queue.pop(0 )
cnt += 1
topo.append(__lowerCAmelCase )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__lowerCAmelCase )
if cnt != len(__lowerCAmelCase ):
print("Cycle exists" )
else:
print(__lowerCAmelCase )
# Adjacency List of Graph
lowerCamelCase__ = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 700
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'vocab.json'}
lowerCamelCase__ = {
'vocab_file': {
'mgp-str': 'https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json',
}
}
lowerCamelCase__ = {'mgp-str': 27}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = VOCAB_FILES_NAMES
lowerCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any]="[GO]" , lowerCamelCase__ : List[str]="[GO]" , lowerCamelCase__ : Optional[int]="[s]" , lowerCamelCase__ : int="[GO]" , **lowerCamelCase__ : str ) ->str:
'''simple docstring'''
super().__init__(
unk_token=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding="utf-8" ) as vocab_handle:
_UpperCAmelCase : Tuple = json.load(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.vocab.items()}
@property
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
return len(self.vocab )
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = []
for s in text:
char_tokens.extend(lowerCamelCase__ )
return char_tokens
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Tuple ) ->List[Any]:
'''simple docstring'''
return self.vocab.get(lowerCamelCase__ , self.vocab.get(self.unk_token ) )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Tuple:
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error("Vocabulary path ({}) should be a directory".format(lowerCamelCase__ ) )
return
_UpperCAmelCase : Optional[int] = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + "\n" )
return (vocab_file,)
| 701
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[Any] = "data2vec-vision"
def __init__( self : Any , lowerCamelCase__ : List[str]=7_68 , lowerCamelCase__ : Optional[Any]=12 , lowerCamelCase__ : List[str]=12 , lowerCamelCase__ : Optional[Any]=30_72 , lowerCamelCase__ : Dict="gelu" , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Any=1E-12 , lowerCamelCase__ : Any=2_24 , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : str=False , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : int=False , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : str=True , lowerCamelCase__ : int=[3, 5, 7, 11] , lowerCamelCase__ : Optional[Any]=[1, 2, 3, 6] , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[Any]=0.4 , lowerCamelCase__ : Dict=2_56 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Dict=2_55 , **lowerCamelCase__ : List[Any] , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : Tuple = image_size
_UpperCAmelCase : Optional[Any] = patch_size
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = use_mask_token
_UpperCAmelCase : List[Any] = use_absolute_position_embeddings
_UpperCAmelCase : Union[str, Any] = use_relative_position_bias
_UpperCAmelCase : Any = use_shared_relative_position_bias
_UpperCAmelCase : List[str] = layer_scale_init_value
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase : Dict = out_indices
_UpperCAmelCase : Any = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase : Union[str, Any] = use_auxiliary_head
_UpperCAmelCase : Optional[int] = auxiliary_loss_weight
_UpperCAmelCase : List[str] = auxiliary_channels
_UpperCAmelCase : List[Any] = auxiliary_num_convs
_UpperCAmelCase : Optional[Any] = auxiliary_concat_input
_UpperCAmelCase : List[Any] = semantic_loss_ignore_index
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : Dict ) ->float:
'''simple docstring'''
return 1E-4
| 702
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = ["image_processor", "tokenizer"]
lowerCAmelCase : Tuple = "CLIPImageProcessor"
lowerCAmelCase : Dict = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : int , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Any=None , **lowerCamelCase__ : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase__ , )
_UpperCAmelCase : List[str] = kwargs.pop("feature_extractor" )
_UpperCAmelCase : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self : List[Any] , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_UpperCAmelCase : Union[str, Any] = self.tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if images is not None:
_UpperCAmelCase : Union[str, Any] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None and images is not None:
_UpperCAmelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ) ->str:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[Any] ) ->Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCAmelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 703
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 704
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 705
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 0
|
'''simple docstring'''
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def __lowerCAmelCase ():
_UpperCAmelCase : Dict = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=__lowerCAmelCase , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=__lowerCAmelCase , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=__lowerCAmelCase )
return parser.parse_args()
def __lowerCAmelCase ():
_UpperCAmelCase : List[str] = parse_args()
# Import training_script as a module.
_UpperCAmelCase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase : Any = script_fpath.stem
_UpperCAmelCase : Tuple = importlib.import_module(__lowerCAmelCase )
# Patch sys.argv
_UpperCAmelCase : Union[str, Any] = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 706
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if not all(char in "01" for char in bin_string ):
raise ValueError("Non-binary value was passed to the function" )
if not bin_string:
raise ValueError("Empty string was passed to the function" )
_UpperCAmelCase : List[Any] = ""
while len(__lowerCAmelCase ) % 3 != 0:
_UpperCAmelCase : Tuple = "0" + bin_string
_UpperCAmelCase : Dict = [
bin_string[index : index + 3]
for index in range(len(__lowerCAmelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
_UpperCAmelCase : int = 0
for index, val in enumerate(__lowerCAmelCase ):
oct_val += int(2 ** (2 - index) * int(__lowerCAmelCase ) )
oct_string += str(__lowerCAmelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 707
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if index == number_of_items:
return 0
_UpperCAmelCase : int = 0
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Tuple = knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 )
if weights[index] <= max_weight:
_UpperCAmelCase : List[Any] = values[index] + knapsack(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 0
|
'''simple docstring'''
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowerCAmelCase__ ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : int ) ->List[Any]:
'''simple docstring'''
super().__init__(features=lowerCamelCase__ )
_UpperCAmelCase : str = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->Dict:
'''simple docstring'''
import torch
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and column:
if all(
isinstance(lowerCamelCase__ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(lowerCamelCase__ )
return column
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Dict ) ->Dict:
'''simple docstring'''
import torch
if isinstance(lowerCamelCase__ , (str, bytes, type(lowerCamelCase__ )) ):
return value
elif isinstance(lowerCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase : Any = {}
if isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
_UpperCAmelCase : Dict = {"dtype": torch.intaa}
elif isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
_UpperCAmelCase : str = np.asarray(lowerCamelCase__ )
return torch.tensor(lowerCamelCase__ , **{**default_dtype, **self.torch_tensor_kwargs} )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(lowerCamelCase__ , "__array__" ) and not isinstance(lowerCamelCase__ , torch.Tensor ):
_UpperCAmelCase : List[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->Tuple:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCamelCase__ , map_list=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase : Dict = self.numpy_arrow_extractor().extract_row(lowerCamelCase__ )
_UpperCAmelCase : Any = self.python_features_decoder.decode_row(lowerCamelCase__ )
return self.recursive_tensorize(lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : pa.Table ) ->"torch.Tensor":
'''simple docstring'''
_UpperCAmelCase : Dict = self.numpy_arrow_extractor().extract_column(lowerCamelCase__ )
_UpperCAmelCase : int = self.python_features_decoder.decode_column(lowerCamelCase__ , pa_table.column_names[0] )
_UpperCAmelCase : Tuple = self.recursive_tensorize(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = self._consolidate(lowerCamelCase__ )
return column
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.numpy_arrow_extractor().extract_batch(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = self.python_features_decoder.decode_batch(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.recursive_tensorize(lowerCamelCase__ )
for column_name in batch:
_UpperCAmelCase : List[Any] = self._consolidate(batch[column_name] )
return batch
| 709
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'edbeeching/decision-transformer-gym-hopper-medium': (
'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Tuple = "decision_transformer"
lowerCAmelCase : Dict = ["past_key_values"]
lowerCAmelCase : int = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[Any] , lowerCamelCase__ : List[Any]=17 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Tuple=1_28 , lowerCamelCase__ : str=40_96 , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[str]=1 , lowerCamelCase__ : List[str]=10_24 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : Optional[int]=None , lowerCamelCase__ : Optional[int]="relu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=1E-5 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : str=True , lowerCamelCase__ : Union[str, Any]=5_02_56 , lowerCamelCase__ : Any=5_02_56 , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Tuple=False , **lowerCamelCase__ : Dict , ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = state_dim
_UpperCAmelCase : List[Any] = act_dim
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : Union[str, Any] = max_ep_len
_UpperCAmelCase : Dict = action_tanh
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : Any = n_positions
_UpperCAmelCase : Tuple = n_layer
_UpperCAmelCase : List[str] = n_head
_UpperCAmelCase : str = n_inner
_UpperCAmelCase : int = activation_function
_UpperCAmelCase : Optional[int] = resid_pdrop
_UpperCAmelCase : Any = embd_pdrop
_UpperCAmelCase : Union[str, Any] = attn_pdrop
_UpperCAmelCase : Optional[Any] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : str = scale_attn_weights
_UpperCAmelCase : Tuple = use_cache
_UpperCAmelCase : str = scale_attn_by_inverse_layer_idx
_UpperCAmelCase : Union[str, Any] = reorder_and_upcast_attn
_UpperCAmelCase : List[Any] = bos_token_id
_UpperCAmelCase : int = eos_token_id
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
| 710
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 0
|
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCAmelCase (__lowerCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = args.pruning_method
_UpperCAmelCase : List[Any] = args.threshold
_UpperCAmelCase : List[str] = args.model_name_or_path.rstrip("/" )
_UpperCAmelCase : str = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
_UpperCAmelCase : Optional[Any] = torch.load(os.path.join(__lowerCAmelCase , "pytorch_model.bin" ) )
_UpperCAmelCase : List[Any] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_UpperCAmelCase : str = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
_UpperCAmelCase : Union[str, Any] = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
_UpperCAmelCase : Tuple = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
_UpperCAmelCase : Optional[Any] = MagnitudeBinarizer.apply(inputs=__lowerCAmelCase , threshold=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_UpperCAmelCase : Union[str, Any] = name[:-6]
_UpperCAmelCase : List[str] = model[F"""{prefix_}mask_scores"""]
_UpperCAmelCase : Tuple = TopKBinarizer.apply(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : List[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_UpperCAmelCase : Union[str, Any] = name[:-6]
_UpperCAmelCase : str = model[F"""{prefix_}mask_scores"""]
_UpperCAmelCase : List[Any] = ThresholdBinarizer.apply(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_UpperCAmelCase : str = name[:-6]
_UpperCAmelCase : List[Any] = model[F"""{prefix_}mask_scores"""]
_UpperCAmelCase : Any = -0.1, 1.1
_UpperCAmelCase : Any = torch.sigmoid(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = s * (r - l) + l
_UpperCAmelCase : List[Any] = s_bar.clamp(min=0.0 , max=1.0 )
_UpperCAmelCase : List[Any] = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
_UpperCAmelCase : List[Any] = os.path.join(
os.path.dirname(__lowerCAmelCase ) , F"""bertarized_{os.path.basename(__lowerCAmelCase )}""" )
if not os.path.isdir(__lowerCAmelCase ):
shutil.copytree(__lowerCAmelCase , __lowerCAmelCase )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
lowerCamelCase__ = parser.parse_args()
main(args)
| 711
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 0
|
'''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class lowerCAmelCase__ ( UpperCAmelCase__ ):
# warning at import time
warnings.warn(
"Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will "
"be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead." , UpperCAmelCase__ , )
| 712
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCamelCase__ = {
'<': operator.lt,
'<=': operator.le,
'==': operator.eq,
'!=': operator.ne,
'>=': operator.ge,
'>': operator.gt,
}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(__lowerCAmelCase ) , version.parse(__lowerCAmelCase ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = None ):
_UpperCAmelCase : Optional[Any] = F"""\n{hint}""" if hint is not None else ""
# non-versioned check
if re.match(R"^[\w_\-\d]+$" , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = requirement, None, None
else:
_UpperCAmelCase : Union[str, Any] = re.findall(R"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , __lowerCAmelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F""" got {requirement}""" )
_UpperCAmelCase : Optional[Any] = match[0]
_UpperCAmelCase : str = want_full.split("," ) # there could be multiple requirements
_UpperCAmelCase : List[str] = {}
for w in want_range:
_UpperCAmelCase : Optional[int] = re.findall(R"^([\s!=<>]{1,2})(.+)" , __lowerCAmelCase )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F""" but got {requirement}""" )
_UpperCAmelCase : Optional[Any] = match[0]
_UpperCAmelCase : Any = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
_UpperCAmelCase : List[str] = ".".join([str(__lowerCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return
# check if any version is installed
try:
_UpperCAmelCase : List[Any] = importlib.metadata.version(__lowerCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : int = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(__lowerCAmelCase , __lowerCAmelCase )
| 713
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 0
|
'''simple docstring'''
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : str , lowerCamelCase__ : bool , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = None ) ->Optional[int]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Optional[int] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_UpperCAmelCase : str = torch.zeros(lowerCamelCase__ , lowerCamelCase__ )
else:
_UpperCAmelCase : str = None
_UpperCAmelCase : str = torch.nn.Parameter(lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : VQModel
lowerCAmelCase : CLIPTextModel
lowerCAmelCase : CLIPTokenizer
lowerCAmelCase : TransformeraDModel
lowerCAmelCase : LearnedClassifierFreeSamplingEmbeddings
lowerCAmelCase : VQDiffusionScheduler
def __init__( self : int , lowerCamelCase__ : VQModel , lowerCamelCase__ : CLIPTextModel , lowerCamelCase__ : CLIPTokenizer , lowerCamelCase__ : TransformeraDModel , lowerCamelCase__ : VQDiffusionScheduler , lowerCamelCase__ : LearnedClassifierFreeSamplingEmbeddings , ) ->Dict:
'''simple docstring'''
super().__init__()
self.register_modules(
vqvae=lowerCamelCase__ , transformer=lowerCamelCase__ , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ , scheduler=lowerCamelCase__ , learned_classifier_free_sampling_embeddings=lowerCamelCase__ , )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = len(lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else 1
# get prompt text embeddings
_UpperCAmelCase : Union[str, Any] = self.tokenizer(
lowerCamelCase__ , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
_UpperCAmelCase : Union[str, Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_UpperCAmelCase : List[Any] = text_input_ids[:, : self.tokenizer.model_max_length]
_UpperCAmelCase : int = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_UpperCAmelCase : Union[str, Any] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowerCamelCase__ )
# duplicate text embeddings for each generation per prompt
_UpperCAmelCase : Tuple = prompt_embeds.repeat_interleave(lowerCamelCase__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_UpperCAmelCase : List[str] = self.learned_classifier_free_sampling_embeddings.embeddings
_UpperCAmelCase : Union[str, Any] = negative_prompt_embeds.unsqueeze(0 ).repeat(lowerCamelCase__ , 1 , 1 )
else:
_UpperCAmelCase : Optional[int] = [""] * batch_size
_UpperCAmelCase : List[Any] = text_input_ids.shape[-1]
_UpperCAmelCase : Dict = self.tokenizer(
lowerCamelCase__ , padding="max_length" , max_length=lowerCamelCase__ , truncation=lowerCamelCase__ , return_tensors="pt" , )
_UpperCAmelCase : Optional[int] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_UpperCAmelCase : Any = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowerCamelCase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase : Optional[Any] = negative_prompt_embeds.shape[1]
_UpperCAmelCase : str = negative_prompt_embeds.repeat(1 , lowerCamelCase__ , 1 )
_UpperCAmelCase : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowerCamelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : Tuple , lowerCamelCase__ : Union[str, List[str]] , lowerCamelCase__ : int = 1_00 , lowerCamelCase__ : float = 5.0 , lowerCamelCase__ : float = 1.0 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = 1
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = len(lowerCamelCase__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase__ )}""" )
_UpperCAmelCase : List[str] = batch_size * num_images_per_prompt
_UpperCAmelCase : Optional[Any] = guidance_scale > 1.0
_UpperCAmelCase : Union[str, Any] = self._encode_prompt(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(lowerCamelCase__ )}.""" )
# get the initial completely masked latents unless the user supplied it
_UpperCAmelCase : int = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_UpperCAmelCase : Optional[int] = self.transformer.num_vector_embeds - 1
_UpperCAmelCase : Optional[int] = torch.full(lowerCamelCase__ , lowerCamelCase__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
F""" {self.transformer.num_vector_embeds - 1} (inclusive).""" )
_UpperCAmelCase : Optional[Any] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
_UpperCAmelCase : str = self.scheduler.timesteps.to(self.device )
_UpperCAmelCase : Any = latents
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the sample if we are doing classifier free guidance
_UpperCAmelCase : Dict = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_UpperCAmelCase : Any = self.transformer(lowerCamelCase__ , encoder_hidden_states=lowerCamelCase__ , timestep=lowerCamelCase__ ).sample
if do_classifier_free_guidance:
_UpperCAmelCase : int = model_output.chunk(2 )
_UpperCAmelCase : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowerCamelCase__ , dim=1 , keepdim=lowerCamelCase__ )
_UpperCAmelCase : Dict = self.truncate(lowerCamelCase__ , lowerCamelCase__ )
# remove `log(0)`'s (`-inf`s)
_UpperCAmelCase : Optional[int] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase : Any = self.scheduler.step(lowerCamelCase__ , timestep=lowerCamelCase__ , sample=lowerCamelCase__ , generator=lowerCamelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Any = self.vqvae.config.vq_embed_dim
_UpperCAmelCase : Dict = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_UpperCAmelCase : Optional[int] = self.vqvae.quantize.get_codebook_entry(lowerCamelCase__ , shape=lowerCamelCase__ )
_UpperCAmelCase : int = self.vqvae.decode(lowerCamelCase__ , force_not_quantize=lowerCamelCase__ ).sample
_UpperCAmelCase : str = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : List[str] = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : float ) ->torch.FloatTensor:
'''simple docstring'''
_UpperCAmelCase : Tuple = torch.sort(lowerCamelCase__ , 1 , descending=lowerCamelCase__ )
_UpperCAmelCase : Any = torch.exp(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_UpperCAmelCase : List[str] = torch.full_like(keep_mask[:, 0:1, :] , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = torch.cat((all_true, keep_mask) , dim=1 )
_UpperCAmelCase : str = keep_mask[:, :-1, :]
_UpperCAmelCase : str = keep_mask.gather(1 , indices.argsort(1 ) )
_UpperCAmelCase : int = log_p_x_0.clone()
_UpperCAmelCase : int = -torch.inf # -inf = log(0)
return rv
| 714
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 0
|
'''simple docstring'''
import re
import subprocess
import sys
lowerCamelCase__ = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
lowerCamelCase__ = (
subprocess.check_output(F'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
lowerCamelCase__ = '|'.join(sys.argv[1:])
lowerCamelCase__ = re.compile(rF'''^({joined_dirs}).*?\.py$''')
lowerCamelCase__ = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 715
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {'configuration_mbart': ['MBART_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MBartConfig', 'MBartOnnxConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['MBartTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['MBartTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MBART_PRETRAINED_MODEL_ARCHIVE_LIST',
'MBartForCausalLM',
'MBartForConditionalGeneration',
'MBartForQuestionAnswering',
'MBartForSequenceClassification',
'MBartModel',
'MBartPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'TFMBartForConditionalGeneration',
'TFMBartModel',
'TFMBartPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'FlaxMBartForConditionalGeneration',
'FlaxMBartForQuestionAnswering',
'FlaxMBartForSequenceClassification',
'FlaxMBartModel',
'FlaxMBartPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 716
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCamelCase__ = random.Random()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=1.0 , __lowerCAmelCase=None , __lowerCAmelCase=None ):
if rng is None:
_UpperCAmelCase : List[str] = global_rng
_UpperCAmelCase : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Any=7 , lowerCamelCase__ : int=4_00 , lowerCamelCase__ : Tuple=20_00 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : Union[str, Any]=1_60_00 , lowerCamelCase__ : str=True , lowerCamelCase__ : int=True , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : Dict = min_seq_length
_UpperCAmelCase : int = max_seq_length
_UpperCAmelCase : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase : List[Any] = feature_size
_UpperCAmelCase : int = padding_value
_UpperCAmelCase : str = sampling_rate
_UpperCAmelCase : List[str] = return_attention_mask
_UpperCAmelCase : Union[str, Any] = do_normalize
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Optional[Any]=False ) ->Optional[int]:
'''simple docstring'''
def _flatten(lowerCamelCase__ : int ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_UpperCAmelCase : Any = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
_UpperCAmelCase : List[str] = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase : Optional[Any] = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = WavaVecaFeatureExtractor
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = WavaVecaFeatureExtractionTester(self )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any] ) ->Tuple:
'''simple docstring'''
self.assertTrue(np.all(np.mean(lowerCamelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def lowerCAmelCase__ ( self : Dict ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_UpperCAmelCase : Optional[Any] = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
_UpperCAmelCase : str = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
_UpperCAmelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test batched
_UpperCAmelCase : Dict = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values
_UpperCAmelCase : Optional[Any] = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_UpperCAmelCase : List[Any] = np.asarray(lowerCamelCase__ )
_UpperCAmelCase : List[str] = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values
_UpperCAmelCase : List[Any] = feat_extract(lowerCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_UpperCAmelCase : Optional[int] = ["longest", "max_length", "do_not_pad"]
_UpperCAmelCase : int = [None, 16_00, None]
for max_length, padding in zip(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = feat_extract(lowerCamelCase__ , padding=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors="np" )
_UpperCAmelCase : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self.assertTrue(input_values[0][8_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self.assertTrue(input_values[0][10_00:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : int = range(8_00 , 14_00 , 2_00 )
_UpperCAmelCase : Optional[int] = [floats_list((1, x) )[0] for x in lengths]
_UpperCAmelCase : List[str] = ["longest", "max_length", "do_not_pad"]
_UpperCAmelCase : List[Any] = [None, 16_00, None]
for max_length, padding in zip(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = feat_extract(lowerCamelCase__ , max_length=lowerCamelCase__ , padding=lowerCamelCase__ )
_UpperCAmelCase : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:8_00] )
self._check_zero_mean_unit_variance(input_values[1][:10_00] )
self._check_zero_mean_unit_variance(input_values[2][:12_00] )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : List[str] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_UpperCAmelCase : List[Any] = feat_extract(
lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=10_00 , padding="max_length" , return_tensors="np" )
_UpperCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Tuple = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_UpperCAmelCase : List[str] = feat_extract(
lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=10_00 , padding="longest" , return_tensors="np" )
_UpperCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 10_00) )
_UpperCAmelCase : int = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_UpperCAmelCase : Dict = feat_extract(
lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=20_00 , padding="longest" , return_tensors="np" )
_UpperCAmelCase : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :8_00] )
self._check_zero_mean_unit_variance(input_values[1, :10_00] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 12_00) )
@require_torch
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
import torch
_UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : Union[str, Any] = np.random.rand(1_00 ).astype(np.floataa )
_UpperCAmelCase : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase : int = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
_UpperCAmelCase : Tuple = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
_UpperCAmelCase : List[Any] = WavaVecaConfig.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase__ )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == "layer" )
| 717
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 0
|
'''simple docstring'''
from itertools import count
def __lowerCAmelCase (__lowerCAmelCase = 50 ):
_UpperCAmelCase : int = [1] * min_block_length
for n in count(__lowerCAmelCase ):
fill_count_functions.append(1 )
for block_length in range(__lowerCAmelCase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = len(__lowerCAmelCase )
for _ in range(__lowerCAmelCase ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
_UpperCAmelCase : str = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
lowerCamelCase__ = list(range(10, 0, -1))
print(F'''Original: {arr}. Sorted: {odd_even_transposition(arr)}''')
| 719
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
from timeit import timeit
lowerCamelCase__ = {
'MALAYALAM': True,
'String': False,
'rotor': True,
'level': True,
'A': True,
'BB': True,
'ABC': False,
'amanaplanacanalpanama': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = len(__lowerCAmelCase ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = len(__lowerCAmelCase ) // 2
_UpperCAmelCase : Optional[Any] = len(__lowerCAmelCase )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase ):
if len(__lowerCAmelCase ) <= 2:
return True
if s[0] == s[len(__lowerCAmelCase ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def __lowerCAmelCase (__lowerCAmelCase ):
return s == s[::-1]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = F"""all({name}(key) is value for key, value in test_data.items())"""
_UpperCAmelCase : List[Any] = F"""from __main__ import test_data, {name}"""
_UpperCAmelCase : Optional[Any] = 500_000
_UpperCAmelCase : Dict = timeit(stmt=__lowerCAmelCase , setup=__lowerCAmelCase , number=__lowerCAmelCase )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F'''{key:21} {value}''')
print('a man a plan a canal panama')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('is_palindrome_slice')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('is_palindrome')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('is_palindrome_recursive')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('is_palindrome_traversal')
| 720
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = BlenderbotSmallTokenizer
lowerCAmelCase : List[Any] = False
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
super().setUp()
_UpperCAmelCase : List[Any] = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
_UpperCAmelCase : Dict = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
_UpperCAmelCase : Tuple = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
_UpperCAmelCase : Optional[int] = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
_UpperCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : Tuple , **lowerCamelCase__ : List[Any] ) ->List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = "adapt act apte"
_UpperCAmelCase : Tuple = "adapt act apte"
return input_text, output_text
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : str = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_UpperCAmelCase : Optional[Any] = "adapt act apte"
_UpperCAmelCase : Union[str, Any] = ["adapt", "act", "ap@@", "te"]
_UpperCAmelCase : Optional[Any] = tokenizer.tokenize(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Dict = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
_UpperCAmelCase : int = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase__ ) , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [13_84]
_UpperCAmelCase : List[Any] = "I am a small frog."
_UpperCAmelCase : List[Any] = tok([src_text] , padding=lowerCamelCase__ , truncation=lowerCamelCase__ )["input_ids"]
_UpperCAmelCase : List[Any] = tok.batch_decode(lowerCamelCase__ , skip_special_tokens=lowerCamelCase__ , clean_up_tokenization_spaces=lowerCamelCase__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
_UpperCAmelCase : Tuple = "I am a small frog ."
_UpperCAmelCase : int = "."
_UpperCAmelCase : Optional[int] = tok(lowerCamelCase__ )["input_ids"]
_UpperCAmelCase : int = tok(lowerCamelCase__ )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 721
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar('T')
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : T ) ->None:
'''simple docstring'''
_UpperCAmelCase : int = data
_UpperCAmelCase : str = self
_UpperCAmelCase : str = 0
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : Any ) ->None:
'''simple docstring'''
_UpperCAmelCase : dict[T, DisjointSetTreeNode[T]] = {}
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : T ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = DisjointSetTreeNode(lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : T ) ->DisjointSetTreeNode[T]:
'''simple docstring'''
_UpperCAmelCase : Any = self.map[data]
if elem_ref != elem_ref.parent:
_UpperCAmelCase : List[str] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : DisjointSetTreeNode[T] , lowerCamelCase__ : DisjointSetTreeNode[T] ) ->None:
'''simple docstring'''
if nodea.rank > nodea.rank:
_UpperCAmelCase : Dict = nodea
else:
_UpperCAmelCase : Union[str, Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : T , lowerCamelCase__ : T ) ->None:
'''simple docstring'''
self.link(self.find_set(lowerCamelCase__ ) , self.find_set(lowerCamelCase__ ) )
class lowerCAmelCase__ ( Generic[T] ):
def __init__( self : int ) ->None:
'''simple docstring'''
_UpperCAmelCase : dict[T, dict[T, int]] = {}
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : T ) ->None:
'''simple docstring'''
if node not in self.connections:
_UpperCAmelCase : List[str] = {}
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : T , lowerCamelCase__ : T , lowerCamelCase__ : int ) ->None:
'''simple docstring'''
self.add_node(lowerCamelCase__ )
self.add_node(lowerCamelCase__ )
_UpperCAmelCase : Dict = weight
_UpperCAmelCase : str = weight
def lowerCAmelCase__ ( self : List[Any] ) ->GraphUndirectedWeighted[T]:
'''simple docstring'''
_UpperCAmelCase : str = []
_UpperCAmelCase : Any = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda lowerCamelCase__ : x[2] )
# creating the disjoint set
_UpperCAmelCase : Tuple = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowerCamelCase__ )
# MST generation
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : str = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
_UpperCAmelCase : Union[str, Any] = edges[index]
index += 1
_UpperCAmelCase : Union[str, Any] = disjoint_set.find_set(lowerCamelCase__ )
_UpperCAmelCase : int = disjoint_set.find_set(lowerCamelCase__ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
disjoint_set.union(lowerCamelCase__ , lowerCamelCase__ )
return graph
| 700
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 701
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 0
|
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : int = IFPipeline
lowerCAmelCase : Optional[Any] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
lowerCAmelCase : str = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase : str = PipelineTesterMixin.required_optional_params - {"latents"}
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=0 ) ->Any:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("mps" ):
_UpperCAmelCase : Tuple = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCAmelCase : int = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self._test_save_load_local()
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa )
_UpperCAmelCase : Tuple = IFSuperResolutionPipeline.from_pretrained(
"DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("cuda" )
_UpperCAmelCase : Tuple = pipe_a.encode_prompt("anime turtle" , device="cuda" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
_UpperCAmelCase : Any = None
_UpperCAmelCase : Optional[int] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
_UpperCAmelCase : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
_UpperCAmelCase : str = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
_UpperCAmelCase : Any = IFInpaintingPipeline(**pipe_a.components )
_UpperCAmelCase : List[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
_start_torch_memory_measurement()
_UpperCAmelCase : str = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe_a(
prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , num_inference_steps=2 , generator=lowerCamelCase__ , output_type="np" , )
_UpperCAmelCase : Dict = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
_UpperCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" )
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase : str = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Any = pipe_a(
prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , image=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Any = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
_start_torch_memory_measurement()
_UpperCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = pipe_a(
prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , image=lowerCamelCase__ , num_inference_steps=2 , generator=lowerCamelCase__ , output_type="np" , )
_UpperCAmelCase : Tuple = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_UpperCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" )
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase : Dict = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = pipe_a(
prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , image=lowerCamelCase__ , original_image=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : List[Any] = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_UpperCAmelCase : Dict = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple ) ->Optional[int]:
'''simple docstring'''
_start_torch_memory_measurement()
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : int = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : Any = pipe_a(
prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , num_inference_steps=2 , generator=lowerCamelCase__ , output_type="np" , )
_UpperCAmelCase : Optional[int] = output.images[0]
assert image.shape == (64, 64, 3)
_UpperCAmelCase : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
_UpperCAmelCase : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" )
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
# pipeline 2
_start_torch_memory_measurement()
_UpperCAmelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
_UpperCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : int = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(0 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 2_56, 2_56) , rng=random.Random(1 ) ).to(lowerCamelCase__ )
_UpperCAmelCase : Tuple = pipe_a(
prompt_embeds=lowerCamelCase__ , negative_prompt_embeds=lowerCamelCase__ , image=lowerCamelCase__ , mask_image=lowerCamelCase__ , original_image=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=2 , output_type="np" , )
_UpperCAmelCase : Any = output.images[0]
assert image.shape == (2_56, 2_56, 3)
_UpperCAmelCase : Optional[int] = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
_UpperCAmelCase : List[str] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" )
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCAmelCase ():
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 702
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 0
|
'''simple docstring'''
import math
import os
import sys
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : str = ""
try:
with open(__lowerCAmelCase , "rb" ) as binary_file:
_UpperCAmelCase : Tuple = binary_file.read()
for dat in data:
_UpperCAmelCase : str = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print("File not accessible" )
sys.exit()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
lexicon.pop(__lowerCAmelCase )
_UpperCAmelCase : str = last_match_id
if math.loga(__lowerCAmelCase ).is_integer():
for curr_key in lexicon:
_UpperCAmelCase : List[str] = "0" + lexicon[curr_key]
_UpperCAmelCase : str = bin(__lowerCAmelCase )[2:]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = {"0": "0", "1": "1"}
_UpperCAmelCase : Optional[int] = "", ""
_UpperCAmelCase : int = len(__lowerCAmelCase )
for i in range(len(__lowerCAmelCase ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_UpperCAmelCase : Tuple = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
index += 1
_UpperCAmelCase : Any = ""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_UpperCAmelCase : List[str] = lexicon[curr_string]
result += last_match_id
return result
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = os.path.getsize(__lowerCAmelCase )
_UpperCAmelCase : List[str] = bin(__lowerCAmelCase )[2:]
_UpperCAmelCase : Dict = len(__lowerCAmelCase )
return "0" * (length_length - 1) + file_length_binary + compressed
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = 8
try:
with open(__lowerCAmelCase , "wb" ) as opened_file:
_UpperCAmelCase : Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(__lowerCAmelCase ) , __lowerCAmelCase )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("10000000" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__lowerCAmelCase , 2 ).to_bytes(1 , byteorder="big" ) )
except OSError:
print("File not accessible" )
sys.exit()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = read_file_binary(__lowerCAmelCase )
_UpperCAmelCase : Tuple = compress_data(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = add_file_length(__lowerCAmelCase , __lowerCAmelCase )
write_file_binary(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 703
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 0
|
'''simple docstring'''
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase__ ( unittest.TestCase ):
lowerCAmelCase : Any = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
_UpperCAmelCase : List[Any] = VideoClassificationPipeline(model=lowerCamelCase__ , image_processor=lowerCamelCase__ , top_k=2 )
_UpperCAmelCase : Any = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple ) ->List[Any]:
'''simple docstring'''
for example in examples:
_UpperCAmelCase : Optional[int] = video_classifier(lowerCamelCase__ )
self.assertEqual(
lowerCamelCase__ , [
{"score": ANY(lowerCamelCase__ ), "label": ANY(lowerCamelCase__ )},
{"score": ANY(lowerCamelCase__ ), "label": ANY(lowerCamelCase__ )},
] , )
@require_torch
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
_UpperCAmelCase : List[str] = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
_UpperCAmelCase : List[Any] = pipeline(
"video-classification" , model=lowerCamelCase__ , feature_extractor=lowerCamelCase__ , frame_sampling_rate=4 )
_UpperCAmelCase : List[str] = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
_UpperCAmelCase : Union[str, Any] = video_classifier(lowerCamelCase__ , top_k=2 )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}] , )
_UpperCAmelCase : Any = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowerCamelCase__ , decimals=4 ) , [
[{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}],
[{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}],
] , )
@require_tf
def lowerCAmelCase__ ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
pass
| 704
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 705
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 0
|
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"""
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True ):
model.train()
_UpperCAmelCase : int = model(__lowerCAmelCase )
_UpperCAmelCase : str = F.mse_loss(__lowerCAmelCase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False ):
set_seed(42 )
_UpperCAmelCase : Any = RegressionModel()
_UpperCAmelCase : Union[str, Any] = deepcopy(__lowerCAmelCase )
_UpperCAmelCase : int = RegressionDataset(length=80 )
_UpperCAmelCase : Optional[Any] = DataLoader(__lowerCAmelCase , batch_size=16 )
model.to(accelerator.device )
if sched:
_UpperCAmelCase : int = AdamW(params=model.parameters() , lr=1e-3 )
_UpperCAmelCase : Optional[Any] = AdamW(params=ddp_model.parameters() , lr=1e-3 )
_UpperCAmelCase : List[Any] = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.6_5 )
_UpperCAmelCase : List[str] = LambdaLR(__lowerCAmelCase , lr_lambda=lambda __lowerCAmelCase : epoch**0.6_5 )
# Make a copy of `model`
if sched:
_UpperCAmelCase : List[str] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
_UpperCAmelCase : Tuple = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def __lowerCAmelCase (__lowerCAmelCase ):
# Test when on a single CPU or GPU that the context manager does nothing
_UpperCAmelCase : List[str] = get_training_setup(__lowerCAmelCase )
# Use a single batch
_UpperCAmelCase : List[Any] = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_UpperCAmelCase : List[str] = accelerator.gather((ddp_input, ddp_target) )
_UpperCAmelCase : List[str] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
_UpperCAmelCase : Any = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def __lowerCAmelCase (__lowerCAmelCase ):
# Test on distributed setup that context manager behaves properly
_UpperCAmelCase : Optional[int] = get_training_setup(__lowerCAmelCase )
# Use a single batch
_UpperCAmelCase : Tuple = next(iter(__lowerCAmelCase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_UpperCAmelCase : int = accelerator.gather((ddp_input, ddp_target) )
_UpperCAmelCase : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
# Sync grads
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
_UpperCAmelCase : Union[str, Any] = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
def __lowerCAmelCase (__lowerCAmelCase=False , __lowerCAmelCase=False ):
_UpperCAmelCase : str = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_UpperCAmelCase : Optional[Any] = get_training_setup(__lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Any = batch.values()
# Gather the distributed inputs and targs for the base model
_UpperCAmelCase : List[str] = accelerator.gather((ddp_input, ddp_target) )
_UpperCAmelCase : List[Any] = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(__lowerCAmelCase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F"""Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"""
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F"""Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"""
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
_UpperCAmelCase : Optional[Any] = ddp_input[torch.randperm(len(__lowerCAmelCase ) )]
GradientState._reset_state()
def __lowerCAmelCase (__lowerCAmelCase=False , __lowerCAmelCase=False ):
_UpperCAmelCase : List[str] = Accelerator(
split_batches=__lowerCAmelCase , dispatch_batches=__lowerCAmelCase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_UpperCAmelCase : Any = get_training_setup(__lowerCAmelCase , __lowerCAmelCase )
for iteration, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = batch.values()
# Gather the distributed inputs and targs for the base model
_UpperCAmelCase : int = accelerator.gather((ddp_input, ddp_target) )
_UpperCAmelCase : Any = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(__lowerCAmelCase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(__lowerCAmelCase ):
step_model(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F"""Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"""
_UpperCAmelCase : str = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(__lowerCAmelCase ))
if accelerator.num_processes > 1:
check_model_parameters(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[Any] = Accelerator()
_UpperCAmelCase : int = RegressionDataset(length=80 )
_UpperCAmelCase : List[Any] = DataLoader(__lowerCAmelCase , batch_size=16 )
_UpperCAmelCase : Dict = RegressionDataset(length=96 )
_UpperCAmelCase : List[Any] = DataLoader(__lowerCAmelCase , batch_size=16 )
_UpperCAmelCase : Union[str, Any] = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if iteration < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(__lowerCAmelCase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(__lowerCAmelCase )
if batch_num < len(__lowerCAmelCase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def __lowerCAmelCase ():
_UpperCAmelCase : int = Accelerator()
_UpperCAmelCase : Union[str, Any] = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(__lowerCAmelCase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(__lowerCAmelCase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation(__lowerCAmelCase , __lowerCAmelCase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F"""`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**""" , )
test_gradient_accumulation_with_opt_and_scheduler(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 706
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = str(__lowerCAmelCase )
return len(__lowerCAmelCase ) == 9 and set(__lowerCAmelCase ) == set("123456789" )
def __lowerCAmelCase ():
for base_num in range(9_999 , 4_999 , -1 ):
_UpperCAmelCase : List[Any] = 100_002 * base_num
if is_9_pandigital(__lowerCAmelCase ):
return candidate
for base_num in range(333 , 99 , -1 ):
_UpperCAmelCase : Dict = 1_002_003 * base_num
if is_9_pandigital(__lowerCAmelCase ):
return candidate
return None
if __name__ == "__main__":
print(F'''{solution() = }''')
| 707
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 0
|
'''simple docstring'''
import os
import time
import numpy as np
import onnxruntime as ort
lowerCamelCase__ = '1'
lowerCamelCase__ = '0'
lowerCamelCase__ = '1'
lowerCamelCase__ = ort.SessionOptions()
lowerCamelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('Create inference session...')
lowerCamelCase__ = ['TensorrtExecutionProvider', 'CUDAExecutionProvider']
lowerCamelCase__ = ort.InferenceSession('model.onnx', sess_options=sess_opt, providers=execution_provider)
lowerCamelCase__ = ort.RunOptions()
lowerCamelCase__ = 128
lowerCamelCase__ = 1
lowerCamelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCamelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCamelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('Warm up phase...')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Start inference...')
lowerCamelCase__ = time.time()
lowerCamelCase__ = 2_000
lowerCamelCase__ = {}
for iter in range(max_iters):
lowerCamelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('Average Inference Time = {:.3f} ms'.format((time.time() - start_time) * 1_000 / max_iters))
| 708
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 0
|
'''simple docstring'''
import requests
lowerCamelCase__ = 'YOUR API KEY'
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = giphy_api_key ):
_UpperCAmelCase : List[Any] = "+".join(query.split() )
_UpperCAmelCase : Optional[Any] = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
_UpperCAmelCase : str = requests.get(__lowerCAmelCase ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('\n'.join(get_gifs('space ship')))
| 709
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 0
|
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any=13 , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Union[str, Any]=16 , lowerCamelCase__ : Union[str, Any]=[32, 64, 1_28] , lowerCamelCase__ : str=[1, 2, 1] , lowerCamelCase__ : Optional[Any]=[2, 2, 4] , lowerCamelCase__ : str=2 , lowerCamelCase__ : str=2.0 , lowerCamelCase__ : int=True , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : str="gelu" , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Any=1E-5 , lowerCamelCase__ : str=True , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Any=True , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : str=8 , lowerCamelCase__ : str=["stage1", "stage2"] , lowerCamelCase__ : str=[1, 2] , ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : int = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : Optional[Any] = embed_dim
_UpperCAmelCase : Dict = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : Optional[Any] = num_heads
_UpperCAmelCase : Optional[Any] = window_size
_UpperCAmelCase : List[Any] = mlp_ratio
_UpperCAmelCase : int = qkv_bias
_UpperCAmelCase : Optional[Any] = hidden_dropout_prob
_UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
_UpperCAmelCase : Tuple = drop_path_rate
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : Optional[Any] = use_absolute_embeddings
_UpperCAmelCase : Union[str, Any] = patch_norm
_UpperCAmelCase : int = layer_norm_eps
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Optional[Any] = scope
_UpperCAmelCase : str = use_labels
_UpperCAmelCase : Dict = type_sequence_label_size
_UpperCAmelCase : str = encoder_stride
_UpperCAmelCase : Tuple = out_features
_UpperCAmelCase : int = out_indices
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Tuple = FocalNetModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_UpperCAmelCase : Any = None
_UpperCAmelCase : Union[str, Any] = FocalNetBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = FocalNetForMaskedImageModeling(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase : Optional[int] = 1
_UpperCAmelCase : Tuple = FocalNetForMaskedImageModeling(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.type_sequence_label_size
_UpperCAmelCase : Any = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase : int = 1
_UpperCAmelCase : Dict = FocalNetForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.prepare_config_and_inputs()
_UpperCAmelCase : str = config_and_inputs
_UpperCAmelCase : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : Tuple = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Any = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Tuple = False
lowerCAmelCase : List[Any] = False
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = FocalNetModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , embed_dim=37 , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
return
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : Union[str, Any] = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , nn.Linear ) )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : Dict = model_class(lowerCamelCase__ )
_UpperCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : int = [*signature.parameters.keys()]
_UpperCAmelCase : List[str] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : Union[str, Any] = outputs.hidden_states
_UpperCAmelCase : Union[str, Any] = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# FocalNet has a different seq_length
_UpperCAmelCase : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : int = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_UpperCAmelCase : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = reshaped_hidden_states[0].shape
_UpperCAmelCase : Optional[int] = (
reshaped_hidden_states[0].view(lowerCamelCase__ , lowerCamelCase__ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Optional[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : Union[str, Any] = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Any = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : int = 3
_UpperCAmelCase : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase : Any = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase : int = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : str = True
self.check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , (padded_height, padded_width) )
@slow
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Optional[Any] = FocalNetModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Tuple = _config_zero_init(lowerCamelCase__ )
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : int = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
_UpperCAmelCase : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Dict = model(**lowerCamelCase__ )
# verify the logits
_UpperCAmelCase : List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : str = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase : int = FocalNetConfig
lowerCAmelCase : Tuple = False
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[str] = FocalNetModelTester(self )
| 710
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError("Input value must be an 'int' type" )
_UpperCAmelCase : Tuple = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : torch.FloatTensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : str , lowerCamelCase__ : int = 6_55_36 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : str = "fourier" , lowerCamelCase__ : bool = True , lowerCamelCase__ : bool = False , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase__ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase__ : Tuple[str] = "UNetMidBlock1D" , lowerCamelCase__ : str = None , lowerCamelCase__ : Tuple[int] = (32, 32, 64) , lowerCamelCase__ : str = None , lowerCamelCase__ : int = 8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : bool = False , ) ->Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
_UpperCAmelCase : List[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase__ , log=lowerCamelCase__ , flip_sin_to_cos=lowerCamelCase__ )
_UpperCAmelCase : Tuple = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_UpperCAmelCase : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase__ , downscale_freq_shift=lowerCamelCase__ )
_UpperCAmelCase : List[str] = block_out_channels[0]
if use_timestep_embedding:
_UpperCAmelCase : List[Any] = block_out_channels[0] * 4
_UpperCAmelCase : List[str] = TimestepEmbedding(
in_channels=lowerCamelCase__ , time_embed_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ , out_dim=block_out_channels[0] , )
_UpperCAmelCase : List[str] = nn.ModuleList([] )
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Union[str, Any] = nn.ModuleList([] )
_UpperCAmelCase : Dict = None
# down
_UpperCAmelCase : List[str] = in_channels
for i, down_block_type in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : str = output_channel
_UpperCAmelCase : Any = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_UpperCAmelCase : Optional[int] = i == len(lowerCamelCase__ ) - 1
_UpperCAmelCase : List[str] = get_down_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase__ )
# mid
_UpperCAmelCase : List[Any] = get_mid_block(
lowerCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase__ , add_downsample=lowerCamelCase__ , )
# up
_UpperCAmelCase : Optional[Any] = list(reversed(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = reversed_block_out_channels[0]
if out_block_type is None:
_UpperCAmelCase : List[str] = out_channels
else:
_UpperCAmelCase : List[str] = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : Any = output_channel
_UpperCAmelCase : int = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels
)
_UpperCAmelCase : Tuple = i == len(lowerCamelCase__ ) - 1
_UpperCAmelCase : Dict = get_up_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase__ )
_UpperCAmelCase : Tuple = output_channel
# out
_UpperCAmelCase : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_UpperCAmelCase : Optional[Any] = get_out_block(
out_block_type=lowerCamelCase__ , num_groups_out=lowerCamelCase__ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase__ , act_fn=lowerCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : Union[torch.Tensor, float, int] , lowerCamelCase__ : bool = True , ) ->Union[UNetaDOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase : Any = timestep
if not torch.is_tensor(lowerCamelCase__ ):
_UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
_UpperCAmelCase : List[Any] = timesteps[None].to(sample.device )
_UpperCAmelCase : List[str] = self.time_proj(lowerCamelCase__ )
if self.config.use_timestep_embedding:
_UpperCAmelCase : Optional[int] = self.time_mlp(lowerCamelCase__ )
else:
_UpperCAmelCase : Optional[int] = timestep_embed[..., None]
_UpperCAmelCase : List[Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_UpperCAmelCase : Optional[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_UpperCAmelCase : List[Any] = ()
for downsample_block in self.down_blocks:
_UpperCAmelCase : Tuple = downsample_block(hidden_states=lowerCamelCase__ , temb=lowerCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_UpperCAmelCase : List[Any] = self.mid_block(lowerCamelCase__ , lowerCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_UpperCAmelCase : Tuple = down_block_res_samples[-1:]
_UpperCAmelCase : Optional[int] = down_block_res_samples[:-1]
_UpperCAmelCase : Optional[Any] = upsample_block(lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , temb=lowerCamelCase__ )
# 5. post-process
if self.out_block:
_UpperCAmelCase : Union[str, Any] = self.out_block(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__ )
| 712
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str]=13 , lowerCamelCase__ : List[Any]=32 , lowerCamelCase__ : str=3 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : str=[10, 20, 30, 40] , lowerCamelCase__ : Optional[Any]=[2, 2, 3, 2] , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : List[Any]=37 , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : List[str]=10 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : int=["stage2", "stage3", "stage4"] , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=None , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : Dict = batch_size
_UpperCAmelCase : List[str] = image_size
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : Any = num_stages
_UpperCAmelCase : List[str] = hidden_sizes
_UpperCAmelCase : Optional[Any] = depths
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : Optional[int] = use_labels
_UpperCAmelCase : List[Any] = intermediate_size
_UpperCAmelCase : Union[str, Any] = hidden_act
_UpperCAmelCase : Optional[int] = type_sequence_label_size
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : str = out_features
_UpperCAmelCase : Optional[int] = num_labels
_UpperCAmelCase : Any = scope
_UpperCAmelCase : Optional[int] = num_stages
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_12 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=lowerCamelCase__ , auxiliary_loss_weight=0.4 , auxiliary_in_channels=40 , auxiliary_channels=2_56 , auxiliary_num_convs=1 , auxiliary_concat_input=lowerCamelCase__ , loss_ignore_index=2_55 , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = UperNetForSemanticSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : str = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) : int = config_and_inputs
_UpperCAmelCase : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : str = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
lowerCAmelCase : Dict = {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Any = False
lowerCAmelCase : Optional[int] = False
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = UperNetModelTester(self )
_UpperCAmelCase : Tuple = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
'''simple docstring'''
return
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Tuple = model_class(lowerCamelCase__ )
_UpperCAmelCase : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
_UpperCAmelCase : List[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def lowerCAmelCase__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="UperNet does not have a base model" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] ):
_UpperCAmelCase : Dict = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase : List[str] = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Optional[Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Union[str, Any] = _config_zero_init(lowerCamelCase__ )
_UpperCAmelCase : str = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
_UpperCAmelCase : Tuple = model_class(config=lowerCamelCase__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip(reason="UperNet does not have tied weights" )
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self : int ) ->int:
'''simple docstring'''
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : str = UperNetForSemanticSegmentation.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
_UpperCAmelCase : int = Image.open(__lowerCAmelCase ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
_UpperCAmelCase : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(lowerCamelCase__ )
_UpperCAmelCase : str = prepare_img()
_UpperCAmelCase : Any = processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(**lowerCamelCase__ )
_UpperCAmelCase : List[Any] = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_UpperCAmelCase : Any = torch.tensor(
[[-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.5_9_5_8, -7.5_9_5_8, -7.4_3_0_2], [-7.4_7_9_7, -7.4_7_9_7, -7.3_0_6_8]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Dict = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
_UpperCAmelCase : Dict = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(lowerCamelCase__ )
_UpperCAmelCase : int = prepare_img()
_UpperCAmelCase : List[Any] = processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
with torch.no_grad():
_UpperCAmelCase : Dict = model(**lowerCamelCase__ )
_UpperCAmelCase : int = torch.Size((1, model.config.num_labels, 5_12, 5_12) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_UpperCAmelCase : Dict = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 713
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = torch.device('cpu')
def __lowerCAmelCase ():
_UpperCAmelCase : int = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Tuple = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
def __lowerCAmelCase (__lowerCAmelCase ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = dct.pop(__lowerCAmelCase )
_UpperCAmelCase : Dict = val
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = []
for k in state_dict.keys():
_UpperCAmelCase : Union[str, Any] = k
if ".pwconv" in k:
_UpperCAmelCase : List[str] = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
_UpperCAmelCase : Dict = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
_UpperCAmelCase : Optional[Any] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
_UpperCAmelCase : Any = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
_UpperCAmelCase : Optional[Any] = k_new.split("." )
if ls[2].isdigit():
_UpperCAmelCase : Optional[Any] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
_UpperCAmelCase : int = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
_UpperCAmelCase : Dict = 1_000
_UpperCAmelCase : List[Any] = "huggingface/label-files"
_UpperCAmelCase : str = "imagenet-1k-id2label.json"
_UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" ) , "r" ) )
_UpperCAmelCase : Any = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_UpperCAmelCase : Any = idalabel
_UpperCAmelCase : List[str] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
_UpperCAmelCase : List[Any] = [3, 3, 6, 4]
_UpperCAmelCase : int = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
_UpperCAmelCase : str = [3, 3, 9, 6]
_UpperCAmelCase : str = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
_UpperCAmelCase : Optional[Any] = [4, 3, 10, 5]
_UpperCAmelCase : List[str] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
_UpperCAmelCase : List[Any] = [4, 4, 12, 6]
_UpperCAmelCase : Any = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
_UpperCAmelCase : Tuple = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location="cpu" , check_hash=__lowerCAmelCase )
else:
_UpperCAmelCase : Union[str, Any] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Optional[Any] = checkpoint
_UpperCAmelCase : int = create_rename_keys(__lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load HuggingFace model
_UpperCAmelCase : Any = SwiftFormerForImageClassification(__lowerCAmelCase ).eval()
hf_model.load_state_dict(__lowerCAmelCase )
# prepare test inputs
_UpperCAmelCase : Optional[int] = prepare_img()
_UpperCAmelCase : Dict = ViTImageProcessor.from_pretrained("preprocessor_config" )
_UpperCAmelCase : Optional[Any] = processor(images=__lowerCAmelCase , return_tensors="pt" )
# compare outputs from both models
_UpperCAmelCase : Any = get_expected_output(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , __lowerCAmelCase , atol=1e-3 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
lowerCamelCase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 714
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 0
|
'''simple docstring'''
from random import randint, random
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = False , __lowerCAmelCase = 5 , ):
_UpperCAmelCase : Union[str, Any] = [[-1] * number_of_cells] # Create a highway without any car
_UpperCAmelCase : int = 0
_UpperCAmelCase : Any = max(__lowerCAmelCase , 0 )
while i < number_of_cells:
_UpperCAmelCase : str = (
randint(0 , __lowerCAmelCase ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = 0
_UpperCAmelCase : Tuple = highway_now[car_index + 1 :]
for cell in range(len(__lowerCAmelCase ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__lowerCAmelCase , -1 )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : str = len(__lowerCAmelCase )
# Beforce calculations, the highway is empty
_UpperCAmelCase : Dict = [-1] * number_of_cells
for car_index in range(__lowerCAmelCase ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_UpperCAmelCase : Dict = min(highway_now[car_index] + 1 , __lowerCAmelCase )
# Number of empty cell before the next car
_UpperCAmelCase : Union[str, Any] = get_distance(__lowerCAmelCase , __lowerCAmelCase ) - 1
# We can't have the car causing an accident
_UpperCAmelCase : int = min(next_highway[car_index] , __lowerCAmelCase )
if random() < probability:
# Randomly, a driver will slow down
_UpperCAmelCase : Any = max(next_highway[car_index] - 1 , 0 )
return next_highway
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = len(highway[0] )
for i in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = update(highway[i] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = [-1] * number_of_cells
for car_index in range(__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_UpperCAmelCase : Tuple = (car_index + speed) % number_of_cells
# Commit the change of position
_UpperCAmelCase : Any = speed
highway.append(__lowerCAmelCase )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 0
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCamelCase__ = get_logger(__name__)
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Optional[str] = None ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Any = (
os.path.join(lowerCamelCase__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCAmelCase : Tuple = Extractor
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCAmelCase : Dict = os.path.abspath(lowerCamelCase__ )
return os.path.join(self.extract_dir , hash_url_to_filename(lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : str , lowerCamelCase__ : bool ) ->bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(lowerCamelCase__ ) and not (os.path.isdir(lowerCamelCase__ ) and os.listdir(lowerCamelCase__ ))
)
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : bool = False ) ->str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.extractor.infer_extractor_format(lowerCamelCase__ )
if not extractor_format:
return input_path
_UpperCAmelCase : List[Any] = self._get_output_path(lowerCamelCase__ )
if self._do_extract(lowerCamelCase__ , lowerCamelCase__ ):
self.extractor.extract(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return output_path
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@classmethod
@abstractmethod
def lowerCAmelCase__ ( cls : List[Any] , lowerCamelCase__ : Union[Path, str] , **lowerCamelCase__ : Optional[Any] ) ->bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
...
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : List[bytes] = []
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : int ) ->Any:
'''simple docstring'''
with open(lowerCamelCase__ , "rb" ) as f:
return f.read(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : bytes = b"" ) ->bool:
'''simple docstring'''
if not magic_number:
_UpperCAmelCase : Dict = max(len(lowerCamelCase__ ) for cls_magic_number in cls.magic_numbers )
try:
_UpperCAmelCase : Dict = cls.read_magic_number(lowerCamelCase__ , lowerCamelCase__ )
except OSError:
return False
return any(magic_number.startswith(lowerCamelCase__ ) for cls_magic_number in cls.magic_numbers )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : Union[Path, str] , **lowerCamelCase__ : Any ) ->bool:
'''simple docstring'''
return tarfile.is_tarfile(lowerCamelCase__ )
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] ) ->Optional[int]:
'''simple docstring'''
def resolved(lowerCamelCase__ : str ) -> str:
return os.path.realpath(os.path.abspath(lowerCamelCase__ ) )
def badpath(lowerCamelCase__ : str , lowerCamelCase__ : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) ).startswith(lowerCamelCase__ )
def badlink(lowerCamelCase__ : Any , lowerCamelCase__ : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCAmelCase : List[Any] = resolved(os.path.join(lowerCamelCase__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowerCamelCase__ )
_UpperCAmelCase : int = resolved(lowerCamelCase__ )
for finfo in members:
if badpath(finfo.name , lowerCamelCase__ ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(lowerCamelCase__ , lowerCamelCase__ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(lowerCamelCase__ , lowerCamelCase__ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
_UpperCAmelCase : Any = tarfile.open(lowerCamelCase__ )
tar_file.extractall(lowerCamelCase__ , members=TarExtractor.safemembers(lowerCamelCase__ , lowerCamelCase__ ) )
tar_file.close()
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = [b"\x1F\x8B"]
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
with gzip.open(lowerCamelCase__ , "rb" ) as gzip_file:
with open(lowerCamelCase__ , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[Any] = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def lowerCAmelCase__ ( cls : str , lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : bytes = b"" ) ->bool:
'''simple docstring'''
if super().is_extractable(lowerCamelCase__ , magic_number=lowerCamelCase__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowerCamelCase__ , "rb" ) as fp:
_UpperCAmelCase : List[Any] = _EndRecData(lowerCamelCase__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCAmelCase : Tuple = fp.read(lowerCamelCase__ ) # CD is where we expect it to be
if len(lowerCamelCase__ ) == sizeCentralDir:
_UpperCAmelCase : List[str] = struct.unpack(lowerCamelCase__ , lowerCamelCase__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
with zipfile.ZipFile(lowerCamelCase__ , "r" ) as zip_file:
zip_file.extractall(lowerCamelCase__ )
zip_file.close()
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[str] = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
with lzma.open(lowerCamelCase__ ) as compressed_file:
with open(lowerCamelCase__ , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : str = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
_UpperCAmelCase : int = rarfile.RarFile(lowerCamelCase__ )
rf.extractall(lowerCamelCase__ )
rf.close()
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_UpperCAmelCase : Tuple = zstd.ZstdDecompressor()
with open(lowerCamelCase__ , "rb" ) as ifh, open(lowerCamelCase__ , "wb" ) as ofh:
dctx.copy_stream(lowerCamelCase__ , lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : str = [b"\x42\x5A\x68"]
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
with bza.open(lowerCamelCase__ , "rb" ) as compressed_file:
with open(lowerCamelCase__ , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[str] = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
with pyazr.SevenZipFile(lowerCamelCase__ , "r" ) as archive:
archive.extractall(lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[Any] = [b"\x04\x22\x4D\x18"]
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] ) ->None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(lowerCamelCase__ , "rb" ) as compressed_file:
with open(lowerCamelCase__ , "wb" ) as extracted_file:
shutil.copyfileobj(lowerCamelCase__ , lowerCamelCase__ )
class lowerCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowerCAmelCase : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def lowerCAmelCase__ ( cls : str ) ->Dict:
'''simple docstring'''
return max(
len(lowerCamelCase__ )
for extractor in cls.extractors.values()
if issubclass(lowerCamelCase__ , lowerCamelCase__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : int ) ->Any:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(lowerCamelCase__ , magic_number_length=lowerCamelCase__ )
except OSError:
return b""
@classmethod
def lowerCAmelCase__ ( cls : Tuple , lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : bool = False ) ->bool:
'''simple docstring'''
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=lowerCamelCase__ , )
_UpperCAmelCase : Tuple = cls.infer_extractor_format(lowerCamelCase__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def lowerCAmelCase__ ( cls : Dict , lowerCamelCase__ : Union[Path, str] ) ->str: # <Added version="2.4.0"/>
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = cls._get_magic_number_max_length()
_UpperCAmelCase : Optional[Any] = cls._read_magic_number(lowerCamelCase__ , lowerCamelCase__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowerCamelCase__ , magic_number=lowerCamelCase__ ):
return extractor_format
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Union[Path, str] , lowerCamelCase__ : Optional[str] = None , lowerCamelCase__ : Optional[BaseExtractor] = "deprecated" , ) ->None:
'''simple docstring'''
os.makedirs(os.path.dirname(lowerCamelCase__ ) , exist_ok=lowerCamelCase__ )
# Prevent parallel extractions
_UpperCAmelCase : List[str] = str(Path(lowerCamelCase__ ).with_suffix(".lock" ) )
with FileLock(lowerCamelCase__ ):
shutil.rmtree(lowerCamelCase__ , ignore_errors=lowerCamelCase__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowerCamelCase__ , lowerCamelCase__ ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=lowerCamelCase__ , )
_UpperCAmelCase : Optional[Any] = extractor if extractor != "deprecated" else extractor_format
else:
_UpperCAmelCase : List[str] = cls.extractors[extractor_format]
return extractor.extract(lowerCamelCase__ , lowerCamelCase__ )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=lowerCamelCase__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowerCamelCase__ ):
return extractor.extract(lowerCamelCase__ , lowerCamelCase__ )
| 716
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = tempfile.mkdtemp()
_UpperCAmelCase : Tuple = BlipImageProcessor()
_UpperCAmelCase : Union[str, Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" )
_UpperCAmelCase : Optional[int] = BlipProcessor(lowerCamelCase__ , lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] , **lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).tokenizer
def lowerCAmelCase__ ( self : List[Any] , **lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).image_processor
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : str = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_UpperCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : List[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : Optional[int] = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
_UpperCAmelCase : int = BlipProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : int = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : Any = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : str = self.prepare_image_inputs()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="np" )
_UpperCAmelCase : Dict = processor(images=lowerCamelCase__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : str = self.get_tokenizer()
_UpperCAmelCase : List[Any] = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Tuple = "lower newer"
_UpperCAmelCase : str = processor(text=lowerCamelCase__ )
_UpperCAmelCase : int = tokenizer(lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_image_processor()
_UpperCAmelCase : Any = self.get_tokenizer()
_UpperCAmelCase : Tuple = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Tuple = "lower newer"
_UpperCAmelCase : Any = self.prepare_image_inputs()
_UpperCAmelCase : Dict = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def lowerCAmelCase__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : List[Any] = self.get_tokenizer()
_UpperCAmelCase : Optional[int] = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : List[Any] = processor.batch_decode(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : Union[str, Any] = BlipProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = "lower newer"
_UpperCAmelCase : Dict = self.prepare_image_inputs()
_UpperCAmelCase : str = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
| 717
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 0
|
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = 'PoolFormerConfig'
# Base docstring
lowerCamelCase__ = 'sail/poolformer_s12'
lowerCamelCase__ = [1, 512, 7, 7]
# Image classification docstring
lowerCamelCase__ = 'sail/poolformer_s12'
lowerCamelCase__ = 'tabby, tabby cat'
lowerCamelCase__ = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 0.0 , __lowerCAmelCase = False ):
if drop_prob == 0.0 or not training:
return input
_UpperCAmelCase : Optional[Any] = 1 - drop_prob
_UpperCAmelCase : Optional[int] = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
_UpperCAmelCase : Any = keep_prob + torch.rand(__lowerCAmelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
_UpperCAmelCase : List[Any] = input.div(__lowerCAmelCase ) * random_tensor
return output
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Optional[int] , lowerCamelCase__ : Optional[float] = None ) ->None:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Union[str, Any] = drop_prob
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : torch.Tensor ) ->torch.Tensor:
'''simple docstring'''
return drop_path(lowerCamelCase__ , self.drop_prob , self.training )
def lowerCAmelCase__ ( self : List[str] ) ->str:
'''simple docstring'''
return "p={}".format(self.drop_prob )
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict=None ) ->Optional[int]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Dict = patch_size if isinstance(lowerCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
_UpperCAmelCase : Optional[int] = stride if isinstance(lowerCamelCase__ , collections.abc.Iterable ) else (stride, stride)
_UpperCAmelCase : Any = padding if isinstance(lowerCamelCase__ , collections.abc.Iterable ) else (padding, padding)
_UpperCAmelCase : List[Any] = nn.Convad(lowerCamelCase__ , lowerCamelCase__ , kernel_size=lowerCamelCase__ , stride=lowerCamelCase__ , padding=lowerCamelCase__ )
_UpperCAmelCase : str = norm_layer(lowerCamelCase__ ) if norm_layer else nn.Identity()
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.projection(lowerCamelCase__ )
_UpperCAmelCase : int = self.norm(lowerCamelCase__ )
return embeddings
class lowerCAmelCase__ ( nn.GroupNorm ):
def __init__( self : Any , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Any ) ->Dict:
'''simple docstring'''
super().__init__(1 , lowerCamelCase__ , **lowerCamelCase__ )
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Any , lowerCamelCase__ : Optional[int] ) ->List[Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : int = nn.AvgPoolad(lowerCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Union[str, Any] ) ->int:
'''simple docstring'''
return self.pool(lowerCamelCase__ ) - hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Optional[Any] = nn.Convad(lowerCamelCase__ , lowerCamelCase__ , 1 )
_UpperCAmelCase : Optional[Any] = nn.Convad(lowerCamelCase__ , lowerCamelCase__ , 1 )
_UpperCAmelCase : Union[str, Any] = PoolFormerDropPath(lowerCamelCase__ )
if isinstance(config.hidden_act , lowerCamelCase__ ):
_UpperCAmelCase : str = ACTaFN[config.hidden_act]
else:
_UpperCAmelCase : int = config.hidden_act
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.conva(lowerCamelCase__ )
_UpperCAmelCase : str = self.act_fn(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = self.drop(lowerCamelCase__ )
_UpperCAmelCase : int = self.conva(lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.drop(lowerCamelCase__ )
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : int = PoolFormerPooling(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = PoolFormerOutput(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = PoolFormerGroupNorm(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = PoolFormerGroupNorm(lowerCamelCase__ )
# Useful for training neural nets
_UpperCAmelCase : int = PoolFormerDropPath(lowerCamelCase__ ) if drop_path > 0.0 else nn.Identity()
_UpperCAmelCase : Union[str, Any] = config.use_layer_scale
if config.use_layer_scale:
_UpperCAmelCase : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowerCamelCase__) ) , requires_grad=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((lowerCamelCase__) ) , requires_grad=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Any ) ->str:
'''simple docstring'''
if self.use_layer_scale:
_UpperCAmelCase : Tuple = self.pooling(self.before_norm(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
_UpperCAmelCase : Optional[int] = hidden_states + self.drop_path(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = ()
_UpperCAmelCase : Optional[int] = self.output(self.after_norm(lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
_UpperCAmelCase : str = hidden_states + self.drop_path(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = (output,) + outputs
return outputs
else:
_UpperCAmelCase : Dict = self.drop_path(self.pooling(self.before_norm(lowerCamelCase__ ) ) )
# First residual connection
_UpperCAmelCase : Tuple = pooling_output + hidden_states
_UpperCAmelCase : str = ()
# Second residual connection inside the PoolFormerOutput block
_UpperCAmelCase : Optional[int] = self.drop_path(self.output(self.after_norm(lowerCamelCase__ ) ) )
_UpperCAmelCase : int = hidden_states + layer_output
_UpperCAmelCase : Optional[int] = (output,) + outputs
return outputs
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : List[str] , lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Dict = config
# stochastic depth decay rule
_UpperCAmelCase : Optional[int] = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
_UpperCAmelCase : Any = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
_UpperCAmelCase : Tuple = nn.ModuleList(lowerCamelCase__ )
# Transformer blocks
_UpperCAmelCase : Any = []
_UpperCAmelCase : Tuple = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
_UpperCAmelCase : List[Any] = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
lowerCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(lowerCamelCase__ ) )
_UpperCAmelCase : List[str] = nn.ModuleList(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : int=True ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = () if output_hidden_states else None
_UpperCAmelCase : List[Any] = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
_UpperCAmelCase : Dict = layers
# Get patch embeddings from hidden_states
_UpperCAmelCase : List[str] = embedding_layer(lowerCamelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = blk(lowerCamelCase__ )
_UpperCAmelCase : str = layer_outputs[0]
if output_hidden_states:
_UpperCAmelCase : str = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowerCamelCase__ , hidden_states=lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[Any] = PoolFormerConfig
lowerCAmelCase : Optional[int] = "poolformer"
lowerCAmelCase : Dict = "pixel_values"
lowerCAmelCase : Dict = True
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ) ->List[str]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(lowerCamelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=False ) ->Any:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = value
lowerCamelCase__ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCamelCase__ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top." , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Dict , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Dict = config
_UpperCAmelCase : int = PoolFormerEncoder(lowerCamelCase__ )
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , ) ->Union[Tuple, BaseModelOutputWithNoAttention]:
'''simple docstring'''
_UpperCAmelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
_UpperCAmelCase : List[str] = self.encoder(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , )
_UpperCAmelCase : int = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Dict , lowerCamelCase__ : str ) ->Tuple:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : List[Any] = nn.Linear(config.hidden_size , config.hidden_size )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.dense(lowerCamelCase__ )
return output
@add_start_docstrings(
"\n PoolFormer Model transformer with an image classification head on top\n " , UpperCAmelCase__ , )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : str , lowerCamelCase__ : Optional[Any] ) ->Tuple:
'''simple docstring'''
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = config.num_labels
_UpperCAmelCase : str = PoolFormerModel(lowerCamelCase__ )
# Final norm
_UpperCAmelCase : List[str] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
_UpperCAmelCase : Union[str, Any] = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[torch.FloatTensor] = None , lowerCamelCase__ : Optional[torch.LongTensor] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , ) ->Union[Tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Any = self.poolformer(
lowerCamelCase__ , output_hidden_states=lowerCamelCase__ , return_dict=lowerCamelCase__ , )
_UpperCAmelCase : List[Any] = outputs[0]
_UpperCAmelCase : Optional[int] = self.classifier(self.norm(lowerCamelCase__ ).mean([-2, -1] ) )
_UpperCAmelCase : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_UpperCAmelCase : Optional[int] = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_UpperCAmelCase : List[Any] = "single_label_classification"
else:
_UpperCAmelCase : Any = "multi_label_classification"
if self.config.problem_type == "regression":
_UpperCAmelCase : Union[str, Any] = MSELoss()
if self.num_labels == 1:
_UpperCAmelCase : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_UpperCAmelCase : Optional[int] = loss_fct(lowerCamelCase__ , lowerCamelCase__ )
elif self.config.problem_type == "single_label_classification":
_UpperCAmelCase : Optional[int] = CrossEntropyLoss()
_UpperCAmelCase : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_UpperCAmelCase : int = BCEWithLogitsLoss()
_UpperCAmelCase : Optional[Any] = loss_fct(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
_UpperCAmelCase : Tuple = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCamelCase__ , logits=lowerCamelCase__ , hidden_states=outputs.hidden_states )
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if discount_rate < 0:
raise ValueError("Discount rate cannot be negative" )
if not cash_flows:
raise ValueError("Cash flows list cannot be empty" )
_UpperCAmelCase : Optional[int] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(__lowerCAmelCase ) )
return round(__lowerCAmelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase__ , "width_multiplier" ) )
class lowerCAmelCase__ :
def __init__( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Any=13 , lowerCamelCase__ : int=64 , lowerCamelCase__ : Any=2 , lowerCamelCase__ : List[Any]=3 , lowerCamelCase__ : Optional[Any]="swish" , lowerCamelCase__ : Union[str, Any]=3 , lowerCamelCase__ : Any=32 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Dict=0.2_5 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : int=0.0 , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : List[str] = batch_size
_UpperCAmelCase : Union[str, Any] = image_size
_UpperCAmelCase : Optional[int] = patch_size
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : List[str] = make_divisible(5_12 * width_multiplier , divisor=8 )
_UpperCAmelCase : Any = hidden_act
_UpperCAmelCase : Optional[int] = conv_kernel_size
_UpperCAmelCase : List[Any] = output_stride
_UpperCAmelCase : List[Any] = classifier_dropout_prob
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Optional[Any] = num_labels
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Optional[int] = scope
_UpperCAmelCase : Dict = width_multiplier
_UpperCAmelCase : str = ffn_dropout
_UpperCAmelCase : Tuple = attn_dropout
def lowerCAmelCase__ ( self : List[str] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : int = None
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : Tuple = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = MobileViTVaModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Optional[int] = MobileViTVaForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.num_labels
_UpperCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_UpperCAmelCase : List[Any] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Dict = self.prepare_config_and_inputs()
_UpperCAmelCase : List[str] = config_and_inputs
_UpperCAmelCase : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Dict = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase : Optional[Any] = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Dict = False
lowerCAmelCase : str = False
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MobileViTVaModelTester(self )
_UpperCAmelCase : List[Any] = MobileViTVaConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : List[str] = [*signature.parameters.keys()]
_UpperCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : int ):
_UpperCAmelCase : str = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Tuple = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = outputs.hidden_states
_UpperCAmelCase : Tuple = 5
self.assertEqual(len(lowerCamelCase__ ) , lowerCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_UpperCAmelCase : Optional[Any] = 2
for i in range(len(lowerCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : int = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : int = MobileViTVaModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
lowerCamelCase__ )
_UpperCAmelCase : str = self.default_image_processor
_UpperCAmelCase : str = prepare_img()
_UpperCAmelCase : Union[str, Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(**lowerCamelCase__ )
# verify the logits
_UpperCAmelCase : List[Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_UpperCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : Union[str, Any] = model.to(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : List[Any] = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
_UpperCAmelCase : List[Any] = outputs.logits
# verify the logits
_UpperCAmelCase : Optional[Any] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , lowerCamelCase__ )
_UpperCAmelCase : int = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=lowerCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : int = model.to(lowerCamelCase__ )
_UpperCAmelCase : Any = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
_UpperCAmelCase : List[str] = prepare_img()
_UpperCAmelCase : Tuple = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(**lowerCamelCase__ )
_UpperCAmelCase : List[Any] = outputs.logits.detach().cpu()
_UpperCAmelCase : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ , target_sizes=[(50, 60)] )
_UpperCAmelCase : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase__ )
_UpperCAmelCase : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , lowerCamelCase__ )
| 720
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
lowerCamelCase__ = logging.get_logger(__name__)
enable_full_determinism()
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = UNetaDModel
lowerCAmelCase : Optional[int] = "sample"
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = 4
_UpperCAmelCase : Any = 3
_UpperCAmelCase : Tuple = (32, 32)
_UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = torch.tensor([10] ).to(lowerCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : str = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
_UpperCAmelCase : List[str] = self.dummy_input
return init_dict, inputs_dict
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Dict = UNetaDModel
lowerCAmelCase : Any = "sample"
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = 4
_UpperCAmelCase : Optional[int] = 4
_UpperCAmelCase : Any = (32, 32)
_UpperCAmelCase : Dict = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = torch.tensor([10] ).to(lowerCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
return (4, 32, 32)
@property
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
return (4, 32, 32)
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
_UpperCAmelCase : List[Any] = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCAmelCase__ ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowerCamelCase__ )
model.to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU" )
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowerCamelCase__ )
model_accelerate.to(lowerCamelCase__ )
model_accelerate.eval()
_UpperCAmelCase : List[Any] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase : Tuple = noise.to(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = torch.tensor([10] * noise.shape[0] ).to(lowerCamelCase__ )
_UpperCAmelCase : Any = model_accelerate(lowerCamelCase__ , lowerCamelCase__ )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCAmelCase : int = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=lowerCamelCase__ , low_cpu_mem_usage=lowerCamelCase__ )
model_normal_load.to(lowerCamelCase__ )
model_normal_load.eval()
_UpperCAmelCase : List[str] = model_normal_load(lowerCamelCase__ , lowerCamelCase__ )["sample"]
assert torch_all_close(lowerCamelCase__ , lowerCamelCase__ , rtol=1E-3 )
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase : List[Any] = noise.to(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = torch.tensor([10] * noise.shape[0] ).to(lowerCamelCase__ )
with torch.no_grad():
_UpperCAmelCase : Any = model(lowerCamelCase__ , lowerCamelCase__ ).sample
_UpperCAmelCase : List[str] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase : Tuple = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase__ , lowerCamelCase__ , rtol=1E-3 ) )
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Union[str, Any] = UNetaDModel
lowerCAmelCase : Optional[int] = "sample"
@property
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Dict=(32, 32) ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = 4
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=lowerCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def lowerCAmelCase__ ( self : List[str] ) ->str:
'''simple docstring'''
return (3, 32, 32)
@property
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
return (3, 32, 32)
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : int = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
_UpperCAmelCase : Optional[int] = self.dummy_input
return init_dict, inputs_dict
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Any = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
self.assertEqual(len(loading_info["missing_keys"] ) , 0 )
model.to(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.dummy_input
_UpperCAmelCase : Union[str, Any] = floats_tensor((4, 3) + (2_56, 2_56) ).to(lowerCamelCase__ )
_UpperCAmelCase : int = noise
_UpperCAmelCase : Dict = model(**lowerCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def lowerCAmelCase__ ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = 4
_UpperCAmelCase : int = 3
_UpperCAmelCase : Optional[int] = (2_56, 2_56)
_UpperCAmelCase : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
_UpperCAmelCase : int = torch.tensor(batch_size * [1E-4] ).to(lowerCamelCase__ )
with torch.no_grad():
_UpperCAmelCase : Tuple = model(lowerCamelCase__ , lowerCamelCase__ ).sample
_UpperCAmelCase : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase : Union[str, Any] = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase__ , lowerCamelCase__ , rtol=1E-2 ) )
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(lowerCamelCase__ )
_UpperCAmelCase : Tuple = 4
_UpperCAmelCase : List[Any] = 3
_UpperCAmelCase : Any = (32, 32)
_UpperCAmelCase : Optional[int] = torch.ones((batch_size, num_channels) + sizes ).to(lowerCamelCase__ )
_UpperCAmelCase : int = torch.tensor(batch_size * [1E-4] ).to(lowerCamelCase__ )
with torch.no_grad():
_UpperCAmelCase : Any = model(lowerCamelCase__ , lowerCamelCase__ ).sample
_UpperCAmelCase : Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase : Dict = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(lowerCamelCase__ , lowerCamelCase__ , rtol=1E-2 ) )
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
pass
| 721
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {'configuration_vit': ['VIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTConfig', 'ViTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['ViTFeatureExtractor']
lowerCamelCase__ = ['ViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'VIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTForImageClassification',
'ViTForMaskedImageModeling',
'ViTModel',
'ViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'TFViTForImageClassification',
'TFViTModel',
'TFViTPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'FlaxViTForImageClassification',
'FlaxViTModel',
'FlaxViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = ShapEImgaImgPipeline
lowerCAmelCase : int = ["image"]
lowerCAmelCase : Optional[int] = ["image"]
lowerCAmelCase : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowerCAmelCase : Optional[Any] = False
@property
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
return 8
@property
def lowerCAmelCase__ ( self : Tuple ) ->Any:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
_UpperCAmelCase : List[str] = CLIPVisionModel(lowerCamelCase__ )
return model
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=lowerCamelCase__ , do_normalize=lowerCamelCase__ , do_resize=lowerCamelCase__ , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=2_24 , )
return image_processor
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"embedding_proj_norm_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_UpperCAmelCase : Tuple = PriorTransformer(**lowerCamelCase__ )
return model
@property
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
_UpperCAmelCase : Dict = ShapERenderer(**lowerCamelCase__ )
return model
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self.dummy_prior
_UpperCAmelCase : str = self.dummy_image_encoder
_UpperCAmelCase : Any = self.dummy_image_processor
_UpperCAmelCase : Union[str, Any] = self.dummy_renderer
_UpperCAmelCase : str = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=lowerCamelCase__ , clip_sample=lowerCamelCase__ , clip_sample_range=1.0 , )
_UpperCAmelCase : List[Any] = {
"prior": prior,
"image_encoder": image_encoder,
"image_processor": image_processor,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int]=0 ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith("mps" ):
_UpperCAmelCase : Dict = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCAmelCase : str = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {
"image": input_image,
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = "cpu"
_UpperCAmelCase : int = self.get_dummy_components()
_UpperCAmelCase : List[str] = self.pipeline_class(**lowerCamelCase__ )
_UpperCAmelCase : Dict = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Tuple = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[int] = output.images[0]
_UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCAmelCase : Dict = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = torch_device == "cpu"
_UpperCAmelCase : Dict = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , )
def lowerCAmelCase__ ( self : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.get_dummy_components()
_UpperCAmelCase : List[str] = self.pipeline_class(**lowerCamelCase__ )
_UpperCAmelCase : Tuple = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Tuple = 1
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : List[str] = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
_UpperCAmelCase : str = batch_size * [inputs[key]]
_UpperCAmelCase : List[str] = pipe(**lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" )
_UpperCAmelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_img2img_out.npy" )
_UpperCAmelCase : str = ShapEImgaImgPipeline.from_pretrained("openai/shap-e-img2img" )
_UpperCAmelCase : str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
_UpperCAmelCase : Tuple = pipe(
lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 701
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/biogpt': 'https://huggingface.co/microsoft/biogpt/resolve/main/config.json',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Union[str, Any] = "biogpt"
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any]=4_23_84 , lowerCamelCase__ : Any=10_24 , lowerCamelCase__ : List[str]=24 , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : Optional[Any]=40_96 , lowerCamelCase__ : Optional[Any]="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : List[str]=10_24 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Any=1E-12 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : str=1 , lowerCamelCase__ : Any=0 , lowerCamelCase__ : List[str]=2 , **lowerCamelCase__ : List[Any] , ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = vocab_size
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : List[str] = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : int = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : List[Any] = scale_embedding
_UpperCAmelCase : List[str] = use_cache
_UpperCAmelCase : Optional[int] = layerdrop
_UpperCAmelCase : Any = activation_dropout
super().__init__(pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
| 702
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 703
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : jnp.ndarray
lowerCAmelCase : jnp.ndarray
class lowerCAmelCase__ ( nn.Module ):
lowerCAmelCase : int
lowerCAmelCase : Tuple[int] = (16, 32, 96, 256)
lowerCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase : Tuple = []
for i in range(len(self.block_out_channels ) - 1 ):
_UpperCAmelCase : Union[str, Any] = self.block_out_channels[i]
_UpperCAmelCase : List[Any] = self.block_out_channels[i + 1]
_UpperCAmelCase : Optional[int] = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase__ )
_UpperCAmelCase : Tuple = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = blocks
_UpperCAmelCase : Optional[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[Any] , lowerCamelCase__ : Tuple ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.conv_in(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = nn.silu(lowerCamelCase__ )
for block in self.blocks:
_UpperCAmelCase : str = block(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = nn.silu(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = self.conv_out(lowerCamelCase__ )
return embedding
@flax_register_to_config
class lowerCAmelCase__ ( nn.Module , UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = 32
lowerCAmelCase : int = 4
lowerCAmelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase : Union[bool, Tuple[bool]] = False
lowerCAmelCase : Tuple[int] = (320, 640, 1_280, 1_280)
lowerCAmelCase : int = 2
lowerCAmelCase : Union[int, Tuple[int]] = 8
lowerCAmelCase : Optional[Union[int, Tuple[int]]] = None
lowerCAmelCase : int = 1_280
lowerCAmelCase : float = 0.0
lowerCAmelCase : bool = False
lowerCAmelCase : jnp.dtype = jnp.floataa
lowerCAmelCase : bool = True
lowerCAmelCase : int = 0
lowerCAmelCase : str = "rgb"
lowerCAmelCase : Tuple[int] = (16, 32, 96, 256)
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : jax.random.KeyArray ) ->FrozenDict:
'''simple docstring'''
_UpperCAmelCase : str = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCAmelCase : Optional[Any] = jnp.zeros(lowerCamelCase__ , dtype=jnp.floataa )
_UpperCAmelCase : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCAmelCase : Any = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCAmelCase : Optional[Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
_UpperCAmelCase : int = jnp.zeros(lowerCamelCase__ , dtype=jnp.floataa )
_UpperCAmelCase : List[Any] = jax.random.split(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng}
return self.init(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )["params"]
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.block_out_channels
_UpperCAmelCase : List[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCAmelCase : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
_UpperCAmelCase : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCAmelCase : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCAmelCase : Optional[Any] = FlaxTimestepEmbedding(lowerCamelCase__ , dtype=self.dtype )
_UpperCAmelCase : Union[str, Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_UpperCAmelCase : int = self.only_cross_attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCAmelCase : Dict = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = block_out_channels[0]
_UpperCAmelCase : List[Any] = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase__ )
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCAmelCase : Dict = output_channel
_UpperCAmelCase : Optional[Any] = block_out_channels[i]
_UpperCAmelCase : int = i == len(lowerCamelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCAmelCase : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_UpperCAmelCase : Union[str, Any] = FlaxDownBlockaD(
in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCamelCase__ )
for _ in range(self.layers_per_block ):
_UpperCAmelCase : Dict = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase__ )
if not is_final_block:
_UpperCAmelCase : List[Any] = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase__ )
_UpperCAmelCase : List[str] = down_blocks
_UpperCAmelCase : List[Any] = controlnet_down_blocks
# mid
_UpperCAmelCase : Tuple = block_out_channels[-1]
_UpperCAmelCase : str = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_UpperCAmelCase : Optional[int] = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : float = 1.0 , lowerCamelCase__ : bool = True , lowerCamelCase__ : bool = False , ) ->Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_UpperCAmelCase : Optional[Any] = jnp.flip(lowerCamelCase__ , axis=1 )
# 1. time
if not isinstance(lowerCamelCase__ , jnp.ndarray ):
_UpperCAmelCase : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCAmelCase : List[str] = timesteps.astype(dtype=jnp.floataa )
_UpperCAmelCase : Optional[int] = jnp.expand_dims(lowerCamelCase__ , 0 )
_UpperCAmelCase : Any = self.time_proj(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.time_embedding(lowerCamelCase__ )
# 2. pre-process
_UpperCAmelCase : List[Any] = jnp.transpose(lowerCamelCase__ , (0, 2, 3, 1) )
_UpperCAmelCase : Dict = self.conv_in(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = jnp.transpose(lowerCamelCase__ , (0, 2, 3, 1) )
_UpperCAmelCase : Optional[Any] = self.controlnet_cond_embedding(lowerCamelCase__ )
sample += controlnet_cond
# 3. down
_UpperCAmelCase : int = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : str = down_block(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , deterministic=not train )
else:
_UpperCAmelCase : List[str] = down_block(lowerCamelCase__ , lowerCamelCase__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_UpperCAmelCase : Optional[int] = self.mid_block(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , deterministic=not train )
# 5. contronet blocks
_UpperCAmelCase : List[str] = ()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase__ , self.controlnet_down_blocks ):
_UpperCAmelCase : Dict = controlnet_block(lowerCamelCase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
_UpperCAmelCase : Dict = controlnet_down_block_res_samples
_UpperCAmelCase : Dict = self.controlnet_mid_block(lowerCamelCase__ )
# 6. scaling
_UpperCAmelCase : int = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase__ , mid_block_res_sample=lowerCamelCase__ )
| 704
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Union[str, Any] = "upernet"
def __init__( self : int , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=5_12 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[Any]=[1, 2, 3, 6] , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[int]=0.4 , lowerCamelCase__ : Union[str, Any]=3_84 , lowerCamelCase__ : Optional[Any]=2_56 , lowerCamelCase__ : int=1 , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : int=2_55 , **lowerCamelCase__ : Dict , ) ->List[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase : Any = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : List[str] = backbone_config.get("model_type" )
_UpperCAmelCase : int = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase : str = config_class.from_dict(lowerCamelCase__ )
_UpperCAmelCase : List[str] = backbone_config
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : str = pool_scales
_UpperCAmelCase : List[str] = use_auxiliary_head
_UpperCAmelCase : Optional[int] = auxiliary_loss_weight
_UpperCAmelCase : Tuple = auxiliary_in_channels
_UpperCAmelCase : str = auxiliary_channels
_UpperCAmelCase : Tuple = auxiliary_num_convs
_UpperCAmelCase : Optional[int] = auxiliary_concat_input
_UpperCAmelCase : List[str] = loss_ignore_index
def lowerCAmelCase__ ( self : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : List[str] = self.backbone_config.to_dict()
_UpperCAmelCase : Dict = self.__class__.model_type
return output
| 705
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 0
|
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __lowerCAmelCase (__lowerCAmelCase ):
return EnvironmentCommand()
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : ArgumentParser ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[str] = parser.add_parser("env" )
download_parser.set_defaults(func=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = huggingface_hub.__version__
_UpperCAmelCase : Dict = "not installed"
_UpperCAmelCase : Union[str, Any] = "NA"
if is_torch_available():
import torch
_UpperCAmelCase : str = torch.__version__
_UpperCAmelCase : Union[str, Any] = torch.cuda.is_available()
_UpperCAmelCase : str = "not installed"
if is_transformers_available():
import transformers
_UpperCAmelCase : Tuple = transformers.__version__
_UpperCAmelCase : List[Any] = "not installed"
if is_accelerate_available():
import accelerate
_UpperCAmelCase : Any = accelerate.__version__
_UpperCAmelCase : Optional[Any] = "not installed"
if is_xformers_available():
import xformers
_UpperCAmelCase : Optional[Any] = xformers.__version__
_UpperCAmelCase : Any = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowerCamelCase__ ) )
return info
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 706
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 0
|
'''simple docstring'''
import itertools
import math
def __lowerCAmelCase (__lowerCAmelCase ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowerCAmelCase ():
_UpperCAmelCase : Any = 2
while True:
if is_prime(__lowerCAmelCase ):
yield num
num += 1
def __lowerCAmelCase (__lowerCAmelCase = 10_001 ):
return next(itertools.islice(prime_generator() , nth - 1 , __lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 707
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase__ :
def __init__( self : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int]=12 , lowerCamelCase__ : Union[str, Any]=7 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : Dict=99 , lowerCamelCase__ : str=32 , lowerCamelCase__ : List[str]=32 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : int=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : Optional[Any]=0.1 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Tuple=5_12 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : Optional[int]=None , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : List[Any] = batch_size
_UpperCAmelCase : str = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : str = use_input_mask
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : Union[str, Any] = vocab_size
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : Optional[int] = projection_dim
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : Optional[Any] = dropout
_UpperCAmelCase : Optional[Any] = attention_dropout
_UpperCAmelCase : Tuple = max_position_embeddings
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : List[Any] = scope
_UpperCAmelCase : int = bos_token_id
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : str = None
if self.use_input_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCAmelCase : int = input_mask.numpy()
_UpperCAmelCase : List[Any] = input_mask.shape
_UpperCAmelCase : Dict = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : List[Any] = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : str , lowerCamelCase__ : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = TFBlipTextModel(config=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , training=lowerCamelCase__ )
_UpperCAmelCase : Dict = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCAmelCase : Tuple = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase : List[str] = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Optional[Any] = False
def lowerCAmelCase__ ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = BlipTextModelTester(self )
_UpperCAmelCase : Any = ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self : int ) ->Optional[Any]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : Tuple = TFBlipTextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Tuple=True ) ->Dict:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCamelCase__ )
| 708
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 0
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : List[str] = "unispeech"
def __init__( self : List[str] , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : List[str]=7_68 , lowerCamelCase__ : str=12 , lowerCamelCase__ : int=12 , lowerCamelCase__ : int=30_72 , lowerCamelCase__ : Any="gelu" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : Dict=0.1 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : List[str]=0.0_2 , lowerCamelCase__ : Tuple=1E-5 , lowerCamelCase__ : Optional[int]="group" , lowerCamelCase__ : List[str]="gelu" , lowerCamelCase__ : Optional[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , lowerCamelCase__ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , lowerCamelCase__ : List[Any]=(10, 3, 3, 3, 3, 2, 2) , lowerCamelCase__ : int=False , lowerCamelCase__ : Union[str, Any]=1_28 , lowerCamelCase__ : List[Any]=16 , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : List[Any]=0.0_5 , lowerCamelCase__ : Dict=10 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Tuple=10 , lowerCamelCase__ : Optional[Any]=0 , lowerCamelCase__ : str=3_20 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Optional[int]=1_00 , lowerCamelCase__ : Optional[Any]=2_56 , lowerCamelCase__ : Any=2_56 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : str="mean" , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : str=2_56 , lowerCamelCase__ : List[str]=80 , lowerCamelCase__ : Tuple=0 , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : str=0.5 , **lowerCamelCase__ : Union[str, Any] , ) ->Any:
'''simple docstring'''
super().__init__(**lowerCamelCase__ , pad_token_id=lowerCamelCase__ , bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
_UpperCAmelCase : Any = hidden_size
_UpperCAmelCase : Any = feat_extract_norm
_UpperCAmelCase : Optional[int] = feat_extract_activation
_UpperCAmelCase : Tuple = list(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Tuple = list(lowerCamelCase__ )
_UpperCAmelCase : List[str] = conv_bias
_UpperCAmelCase : int = num_conv_pos_embeddings
_UpperCAmelCase : Dict = num_conv_pos_embedding_groups
_UpperCAmelCase : Union[str, Any] = len(self.conv_dim )
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : Dict = hidden_dropout
_UpperCAmelCase : List[str] = attention_dropout
_UpperCAmelCase : int = activation_dropout
_UpperCAmelCase : Union[str, Any] = feat_proj_dropout
_UpperCAmelCase : List[Any] = final_dropout
_UpperCAmelCase : List[str] = layerdrop
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : int = num_ctc_classes
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : List[str] = do_stable_layer_norm
_UpperCAmelCase : Optional[Any] = use_weighted_layer_sum
_UpperCAmelCase : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase : List[str] = apply_spec_augment
_UpperCAmelCase : int = mask_time_prob
_UpperCAmelCase : Union[str, Any] = mask_time_length
_UpperCAmelCase : str = mask_time_min_masks
_UpperCAmelCase : Dict = mask_feature_prob
_UpperCAmelCase : Any = mask_feature_length
_UpperCAmelCase : Union[str, Any] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCAmelCase : List[str] = num_codevectors_per_group
_UpperCAmelCase : str = num_codevector_groups
_UpperCAmelCase : Optional[Any] = contrastive_logits_temperature
_UpperCAmelCase : Union[str, Any] = feat_quantizer_dropout
_UpperCAmelCase : Any = num_negatives
_UpperCAmelCase : str = codevector_dim
_UpperCAmelCase : List[str] = proj_codevector_dim
_UpperCAmelCase : Dict = diversity_loss_weight
# ctc loss
_UpperCAmelCase : Optional[int] = ctc_loss_reduction
_UpperCAmelCase : Tuple = ctc_zero_infinity
# pretraining loss
_UpperCAmelCase : List[str] = replace_prob
@property
def lowerCAmelCase__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 709
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if len(__lowerCAmelCase ) <= 1:
return lst
_UpperCAmelCase : Optional[int] = 1
while i < len(__lowerCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase : Optional[int] = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase : Any = 1
return lst
if __name__ == "__main__":
lowerCamelCase__ = input('Enter numbers separated by a comma:\n').strip()
lowerCamelCase__ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 710
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 0
|
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : List[Any] , lowerCamelCase__ : UNetaDModel , lowerCamelCase__ : UNetaDModel , lowerCamelCase__ : DDPMScheduler , lowerCamelCase__ : Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Dict = value_function
_UpperCAmelCase : int = unet
_UpperCAmelCase : Dict = scheduler
_UpperCAmelCase : Optional[int] = env
_UpperCAmelCase : int = env.get_dataset()
_UpperCAmelCase : List[str] = {}
for key in self.data.keys():
try:
_UpperCAmelCase : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
_UpperCAmelCase : List[Any] = {}
for key in self.data.keys():
try:
_UpperCAmelCase : Optional[int] = self.data[key].std()
except: # noqa: E722
pass
_UpperCAmelCase : Union[str, Any] = env.observation_space.shape[0]
_UpperCAmelCase : Tuple = env.action_space.shape[0]
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->Any:
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] ) ->str:
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Union[str, Any] ) ->int:
'''simple docstring'''
if type(lowerCamelCase__ ) is dict:
return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()}
elif torch.is_tensor(lowerCamelCase__ ):
return x_in.to(self.unet.device )
return torch.tensor(lowerCamelCase__ , device=self.unet.device )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : Dict ) ->Optional[int]:
'''simple docstring'''
for key, val in cond.items():
_UpperCAmelCase : str = val.clone()
return x_in
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : int ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] = x.shape[0]
_UpperCAmelCase : Tuple = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
_UpperCAmelCase : str = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long )
for _ in range(lowerCamelCase__ ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
_UpperCAmelCase : Optional[int] = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample
_UpperCAmelCase : Optional[int] = torch.autograd.grad([y.sum()] , [x] )[0]
_UpperCAmelCase : List[str] = self.scheduler._get_variance(lowerCamelCase__ )
_UpperCAmelCase : str = torch.exp(0.5 * posterior_variance )
_UpperCAmelCase : str = model_std * grad
_UpperCAmelCase : str = 0
_UpperCAmelCase : Union[str, Any] = x.detach()
_UpperCAmelCase : Optional[Any] = x + scale * grad
_UpperCAmelCase : Union[str, Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
_UpperCAmelCase : int = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
_UpperCAmelCase : Tuple = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["prev_sample"]
# apply conditions to the trajectory (set the initial state)
_UpperCAmelCase : Union[str, Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
_UpperCAmelCase : Optional[int] = self.to_torch(lowerCamelCase__ )
return x, y
def __call__( self : Dict , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str]=64 , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : Tuple=0.1 ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = self.normalize(lowerCamelCase__ , "observations" )
_UpperCAmelCase : str = obs[None].repeat(lowerCamelCase__ , axis=0 )
_UpperCAmelCase : List[str] = {0: self.to_torch(lowerCamelCase__ )}
_UpperCAmelCase : Union[str, Any] = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
_UpperCAmelCase : Tuple = randn_tensor(lowerCamelCase__ , device=self.unet.device )
_UpperCAmelCase : Any = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim )
_UpperCAmelCase : Optional[int] = self.to_torch(lowerCamelCase__ )
# run the diffusion process
_UpperCAmelCase : Any = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# sort output trajectories by value
_UpperCAmelCase : Any = y.argsort(0 , descending=lowerCamelCase__ ).squeeze()
_UpperCAmelCase : Any = x[sorted_idx]
_UpperCAmelCase : Any = sorted_values[:, :, : self.action_dim]
_UpperCAmelCase : str = actions.detach().cpu().numpy()
_UpperCAmelCase : Optional[int] = self.de_normalize(lowerCamelCase__ , key="actions" )
# select the action with the highest value
if y is not None:
_UpperCAmelCase : Tuple = 0
else:
# if we didn't run value guiding, select a random action
_UpperCAmelCase : List[Any] = np.random.randint(0 , lowerCamelCase__ )
_UpperCAmelCase : List[str] = denorm_actions[selected_index, 0]
return denorm_actions
| 711
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 0
|
'''simple docstring'''
lowerCamelCase__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCamelCase__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
lowerCamelCase__ = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
assert len(str(__lowerCAmelCase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_UpperCAmelCase : List[Any] = year // 100
_UpperCAmelCase : Dict = (5 * (century % 4) + 2) % 7
_UpperCAmelCase : int = year % 100
_UpperCAmelCase : Dict = centurian % 12
_UpperCAmelCase : Union[str, Any] = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_UpperCAmelCase : int = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_UpperCAmelCase : Optional[Any] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Dict = ShapEPipeline
lowerCAmelCase : List[Any] = ["prompt"]
lowerCAmelCase : Union[str, Any] = ["prompt"]
lowerCAmelCase : str = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
lowerCAmelCase : Optional[Any] = False
@property
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self : List[str] ) ->str:
'''simple docstring'''
return 32
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
return 8
@property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def lowerCAmelCase__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(lowerCamelCase__ )
@property
def lowerCAmelCase__ ( self : int ) ->Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : Union[str, Any] = {
"num_attention_heads": 2,
"attention_head_dim": 16,
"embedding_dim": self.time_input_dim,
"num_embeddings": 32,
"embedding_proj_dim": self.text_embedder_hidden_size,
"time_embed_dim": self.time_embed_dim,
"num_layers": 1,
"clip_embed_dim": self.time_input_dim * 2,
"additional_embeddings": 0,
"time_embed_act_fn": "gelu",
"norm_in_type": "layer",
"encoder_hid_proj_type": None,
"added_emb_type": None,
}
_UpperCAmelCase : int = PriorTransformer(**lowerCamelCase__ )
return model
@property
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase : str = {
"param_shapes": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"d_latent": self.time_input_dim,
"d_hidden": self.renderer_dim,
"n_output": 12,
"background": (
0.1,
0.1,
0.1,
),
}
_UpperCAmelCase : int = ShapERenderer(**lowerCamelCase__ )
return model
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.dummy_prior
_UpperCAmelCase : Optional[Any] = self.dummy_text_encoder
_UpperCAmelCase : Optional[Any] = self.dummy_tokenizer
_UpperCAmelCase : Dict = self.dummy_renderer
_UpperCAmelCase : Tuple = HeunDiscreteScheduler(
beta_schedule="exp" , num_train_timesteps=10_24 , prediction_type="sample" , use_karras_sigmas=lowerCamelCase__ , clip_sample=lowerCamelCase__ , clip_sample_range=1.0 , )
_UpperCAmelCase : Optional[int] = {
"prior": prior,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"renderer": renderer,
"scheduler": scheduler,
}
return components
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str]=0 ) ->str:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith("mps" ):
_UpperCAmelCase : Dict = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCAmelCase : int = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCAmelCase : Any = {
"prompt": "horse",
"generator": generator,
"num_inference_steps": 1,
"frame_size": 32,
"output_type": "np",
}
return inputs
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = "cpu"
_UpperCAmelCase : Dict = self.get_dummy_components()
_UpperCAmelCase : Optional[Any] = self.pipeline_class(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
_UpperCAmelCase : str = output.images[0]
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
_UpperCAmelCase : Optional[Any] = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = torch_device == "cpu"
_UpperCAmelCase : int = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCamelCase__ , relax_max_difference=lowerCamelCase__ , )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = self.get_dummy_components()
_UpperCAmelCase : Union[str, Any] = self.pipeline_class(**lowerCamelCase__ )
_UpperCAmelCase : str = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : int = self.get_dummy_inputs(lowerCamelCase__ )
for key in inputs.keys():
if key in self.batch_params:
_UpperCAmelCase : Any = batch_size * [inputs[key]]
_UpperCAmelCase : str = pipe(**lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/shap_e/test_shap_e_np_out.npy" )
_UpperCAmelCase : Optional[Any] = ShapEPipeline.from_pretrained("openai/shap-e" )
_UpperCAmelCase : Dict = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase : Dict = torch.Generator(device=lowerCamelCase__ ).manual_seed(0 )
_UpperCAmelCase : Optional[Any] = pipe(
"a shark" , generator=lowerCamelCase__ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="np" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 713
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
lowerCamelCase__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
lowerCamelCase__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
lowerCamelCase__ = '▁'
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = VOCAB_FILES_NAMES
lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : List[str] = BigBirdTokenizer
lowerCAmelCase : str = ["input_ids", "attention_mask"]
lowerCAmelCase : List[int] = []
def __init__( self : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Optional[Any]="<unk>" , lowerCamelCase__ : int="<s>" , lowerCamelCase__ : str="</s>" , lowerCamelCase__ : Optional[Any]="<pad>" , lowerCamelCase__ : Tuple="[SEP]" , lowerCamelCase__ : Union[str, Any]="[MASK]" , lowerCamelCase__ : str="[CLS]" , **lowerCamelCase__ : Optional[int] , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Dict = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
_UpperCAmelCase : Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
_UpperCAmelCase : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
_UpperCAmelCase : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
_UpperCAmelCase : Tuple = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
_UpperCAmelCase : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase : Any = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , **lowerCamelCase__ , )
_UpperCAmelCase : Optional[int] = vocab_file
_UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [self.sep_token_id]
_UpperCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None , lowerCamelCase__ : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = [self.sep_token_id]
_UpperCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase : str = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 714
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 0
|
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
lowerCamelCase__ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
lowerCamelCase__ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
lowerCamelCase__ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
lowerCamelCase__ = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
lowerCamelCase__ = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
lowerCamelCase__ = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
lowerCamelCase__ = tf.keras.preprocessing.image.img_to_array(test_image)
lowerCamelCase__ = np.expand_dims(test_image, axis=0)
lowerCamelCase__ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
lowerCamelCase__ = 'Normal'
if result[0][0] == 1:
lowerCamelCase__ = 'Abnormality detected'
| 715
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 0
|
'''simple docstring'''
import math
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if (
not isinstance(__lowerCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * power_factor
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if (
not isinstance(__lowerCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1." )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCamelCase__ = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase__ ( folder_based_builder.FolderBasedBuilderConfig ):
lowerCAmelCase : bool = None
lowerCAmelCase : bool = None
class lowerCAmelCase__ ( folder_based_builder.FolderBasedBuilder ):
lowerCAmelCase : Dict = datasets.Audio()
lowerCAmelCase : int = "audio"
lowerCAmelCase : str = AudioFolderConfig
lowerCAmelCase : List[str] # definition at the bottom of the script
lowerCAmelCase : Any = AudioClassification(audio_column="audio" , label_column="label" )
lowerCamelCase__ = [
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
lowerCamelCase__ = AUDIO_EXTENSIONS
| 717
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 0
|
'''simple docstring'''
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = ['model.decoder.embed_positions.weights']
def __lowerCAmelCase (__lowerCAmelCase ):
if "emb" in name:
_UpperCAmelCase : List[str] = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
_UpperCAmelCase : List[str] = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
_UpperCAmelCase : Tuple = name.replace("linear1" , "fc1" )
if "linear2" in name:
_UpperCAmelCase : Optional[Any] = name.replace("linear2" , "fc2" )
if "norm1" in name:
_UpperCAmelCase : List[Any] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
_UpperCAmelCase : int = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
_UpperCAmelCase : Union[str, Any] = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
_UpperCAmelCase : Optional[int] = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
_UpperCAmelCase : Any = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
_UpperCAmelCase : Any = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : str = list(state_dict.keys() )
_UpperCAmelCase : Tuple = {}
for key in keys:
_UpperCAmelCase : List[Any] = state_dict.pop(__lowerCAmelCase )
_UpperCAmelCase : Tuple = rename_keys(__lowerCAmelCase )
if "in_proj_weight" in key:
# split fused qkv proj
_UpperCAmelCase : Any = val[:hidden_size, :]
_UpperCAmelCase : Union[str, Any] = val[hidden_size : 2 * hidden_size, :]
_UpperCAmelCase : Optional[int] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
_UpperCAmelCase : Optional[Any] = val
else:
_UpperCAmelCase : Dict = val
return state_dict, enc_dec_proj_state_dict
def __lowerCAmelCase (__lowerCAmelCase ):
if checkpoint == "small":
# default config values
_UpperCAmelCase : Any = 1_024
_UpperCAmelCase : Tuple = 24
_UpperCAmelCase : Union[str, Any] = 16
elif checkpoint == "medium":
_UpperCAmelCase : List[Any] = 1_536
_UpperCAmelCase : str = 48
_UpperCAmelCase : str = 24
elif checkpoint == "large":
_UpperCAmelCase : Dict = 2_048
_UpperCAmelCase : Optional[int] = 48
_UpperCAmelCase : Any = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
_UpperCAmelCase : Optional[int] = MusicgenDecoderConfig(
hidden_size=__lowerCAmelCase , ffn_dim=hidden_size * 4 , num_hidden_layers=__lowerCAmelCase , num_attention_heads=__lowerCAmelCase , )
return config
@torch.no_grad()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="cpu" ):
_UpperCAmelCase : str = MusicGen.get_pretrained(__lowerCAmelCase , device=__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = decoder_config_from_checkpoint(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = fairseq_model.lm.state_dict()
_UpperCAmelCase : Any = rename_state_dict(
__lowerCAmelCase , hidden_size=decoder_config.hidden_size )
_UpperCAmelCase : Tuple = TaEncoderModel.from_pretrained("t5-base" )
_UpperCAmelCase : int = EncodecModel.from_pretrained("facebook/encodec_32khz" )
_UpperCAmelCase : Tuple = MusicgenForCausalLM(__lowerCAmelCase ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
_UpperCAmelCase : Any = decoder.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(__lowerCAmelCase ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
_UpperCAmelCase : Tuple = MusicgenForConditionalGeneration(text_encoder=__lowerCAmelCase , audio_encoder=__lowerCAmelCase , decoder=__lowerCAmelCase )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__lowerCAmelCase )
# check we can do a forward pass
_UpperCAmelCase : Any = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
_UpperCAmelCase : Tuple = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
_UpperCAmelCase : Any = model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
_UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("t5-base" )
_UpperCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
_UpperCAmelCase : Optional[int] = MusicgenProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
# set the appropriate bos/pad token ids
_UpperCAmelCase : Dict = 2_048
_UpperCAmelCase : List[str] = 2_048
# set other default generation config params
_UpperCAmelCase : Dict = int(30 * audio_encoder.config.frame_rate )
_UpperCAmelCase : Any = True
_UpperCAmelCase : Dict = 3.0
if pytorch_dump_folder is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(__lowerCAmelCase )
processor.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint',
default='small',
type=str,
help='Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.',
)
parser.add_argument(
'--pytorch_dump_folder',
required=True,
default=None,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
parser.add_argument(
'--device', default='cpu', type=str, help='Torch device to run the conversion, either cpu or cuda.'
)
lowerCamelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowerCamelCase__ = 500_000
lowerCamelCase__ ,lowerCamelCase__ = os.path.split(__file__)
lowerCamelCase__ = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def __lowerCAmelCase (__lowerCAmelCase , **__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = dataset.map(**__lowerCAmelCase )
@get_duration
def __lowerCAmelCase (__lowerCAmelCase , **__lowerCAmelCase ):
_UpperCAmelCase : Dict = dataset.filter(**__lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : str = {"num examples": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Tuple = datasets.Features({"text": datasets.Value("string" ), "numbers": datasets.Value("float32" )} )
_UpperCAmelCase : Optional[Any] = generate_example_dataset(
os.path.join(__lowerCAmelCase , "dataset.arrow" ) , __lowerCAmelCase , num_examples=__lowerCAmelCase )
_UpperCAmelCase : Tuple = transformers.AutoTokenizer.from_pretrained("bert-base-cased" , use_fast=__lowerCAmelCase )
def tokenize(__lowerCAmelCase ):
return tokenizer(examples["text"] )
_UpperCAmelCase : List[Any] = map(__lowerCAmelCase )
_UpperCAmelCase : List[Any] = map(__lowerCAmelCase , batched=__lowerCAmelCase )
_UpperCAmelCase : str = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="numpy" ):
_UpperCAmelCase : Optional[Any] = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="pandas" ):
_UpperCAmelCase : Dict = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="torch" , columns="numbers" ):
_UpperCAmelCase : Optional[Any] = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
with dataset.formatted_as(type="tensorflow" , columns="numbers" ):
_UpperCAmelCase : Tuple = map(__lowerCAmelCase , function=lambda __lowerCAmelCase : None , batched=__lowerCAmelCase )
_UpperCAmelCase : Tuple = map(__lowerCAmelCase , function=__lowerCAmelCase , batched=__lowerCAmelCase )
_UpperCAmelCase : Dict = filter(__lowerCAmelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__lowerCAmelCase , "wb" ) as f:
f.write(json.dumps(__lowerCAmelCase ).encode("utf-8" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 719
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def __lowerCAmelCase (__lowerCAmelCase ):
warnings.warn(
"The preprocess method is deprecated and will be removed in a future version. Please"
" use VaeImageProcessor.preprocess instead" , __lowerCAmelCase , )
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase : Union[str, Any] = [image]
if isinstance(image[0] , PIL.Image.Image ):
_UpperCAmelCase : List[Any] = image[0].size
_UpperCAmelCase : str = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_UpperCAmelCase : int = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_UpperCAmelCase : List[Any] = np.concatenate(__lowerCAmelCase , axis=0 )
_UpperCAmelCase : str = np.array(__lowerCAmelCase ).astype(np.floataa ) / 255.0
_UpperCAmelCase : Optional[Any] = image.transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase : Optional[int] = 2.0 * image - 1.0
_UpperCAmelCase : Union[str, Any] = torch.from_numpy(__lowerCAmelCase )
elif isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase : Union[str, Any] = torch.cat(__lowerCAmelCase , dim=0 )
return image
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return mask
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase : Optional[int] = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_UpperCAmelCase : str = mask[0].size
_UpperCAmelCase : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase : Tuple = [np.array(m.convert("L" ).resize((w, h) , resample=PIL_INTERPOLATION["nearest"] ) )[None, :] for m in mask]
_UpperCAmelCase : int = np.concatenate(__lowerCAmelCase , axis=0 )
_UpperCAmelCase : Optional[int] = mask.astype(np.floataa ) / 255.0
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : List[Any] = 1
_UpperCAmelCase : List[Any] = torch.from_numpy(__lowerCAmelCase )
elif isinstance(mask[0] , torch.Tensor ):
_UpperCAmelCase : Tuple = torch.cat(__lowerCAmelCase , dim=0 )
return mask
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : UNetaDModel
lowerCAmelCase : RePaintScheduler
def __init__( self : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] ) ->Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self : Union[str, Any] , lowerCamelCase__ : Union[torch.Tensor, PIL.Image.Image] , lowerCamelCase__ : Union[torch.Tensor, PIL.Image.Image] , lowerCamelCase__ : int = 2_50 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 10 , lowerCamelCase__ : int = 10 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = image
_UpperCAmelCase : Tuple = _preprocess_image(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = original_image.to(device=self.device , dtype=self.unet.dtype )
_UpperCAmelCase : int = _preprocess_mask(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = mask_image.to(device=self.device , dtype=self.unet.dtype )
_UpperCAmelCase : Union[str, Any] = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : int = original_image.shape
_UpperCAmelCase : List[str] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.device )
_UpperCAmelCase : str = eta
_UpperCAmelCase : Optional[int] = self.scheduler.timesteps[0] + 1
_UpperCAmelCase : Optional[int] = generator[0] if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_UpperCAmelCase : Any = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# compute previous image: x_t -> x_t-1
_UpperCAmelCase : Optional[int] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_UpperCAmelCase : Union[str, Any] = self.scheduler.undo_step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = t
_UpperCAmelCase : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : Union[str, Any] = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 720
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowerCamelCase__ = '<<<<<<< This should probably be modified because it mentions: '
lowerCamelCase__ = '=======\n>>>>>>>\n'
lowerCamelCase__ = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowerCamelCase__ = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def __lowerCAmelCase (__lowerCAmelCase ):
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : ArgumentParser ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=lowerCamelCase__ , required=lowerCamelCase__ , help="Path to the HuggingFace Datasets folder." )
train_parser.set_defaults(func=lowerCamelCase__ )
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str , *lowerCamelCase__ : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = get_logger("datasets-cli/converting" )
_UpperCAmelCase : List[str] = tfds_path
_UpperCAmelCase : str = datasets_directory
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase : Optional[Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_UpperCAmelCase : List[Any] = os.path.dirname(self._tfds_path )
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path." )
_UpperCAmelCase : Tuple = os.path.abspath(self._datasets_directory )
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""" )
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : int = {}
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase : Dict = os.listdir(lowerCamelCase__ )
else:
_UpperCAmelCase : Union[str, Any] = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""" )
_UpperCAmelCase : int = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : str = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
if not os.path.isfile(lowerCamelCase__ ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file" )
continue
with open(lowerCamelCase__ , encoding="utf-8" ) as f:
_UpperCAmelCase : Any = f.readlines()
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Any = []
for line in lines:
_UpperCAmelCase : str = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_UpperCAmelCase : List[Any] = "import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
_UpperCAmelCase : Tuple = ""
continue
elif "from absl import logging" in out_line:
_UpperCAmelCase : Optional[Any] = "from datasets import logging\n"
elif "getLogger" in out_line:
_UpperCAmelCase : str = out_line.replace("getLogger" , "get_logger" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_UpperCAmelCase : int = True
_UpperCAmelCase : Optional[int] = list(filter(lambda lowerCamelCase__ : e in out_line , lowerCamelCase__ ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(lowerCamelCase__ ) + "\n" )
out_lines.append(lowerCamelCase__ )
out_lines.append(lowerCamelCase__ )
continue
else:
for pattern, replacement in TO_CONVERT:
_UpperCAmelCase : Dict = re.sub(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_UpperCAmelCase : Optional[int] = re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , lowerCamelCase__ )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split("," ) )
_UpperCAmelCase : List[str] = "from . import " + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""" )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_UpperCAmelCase : int = True
out_lines.append(lowerCamelCase__ )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_UpperCAmelCase : List[str] = f_name.replace(".py" , "" )
_UpperCAmelCase : Optional[Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
self._logger.info(F"""Adding directory {output_dir}""" )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(lowerCamelCase__ )
if needs_manual_update:
with_manual_update.append(lowerCamelCase__ )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.writelines(lowerCamelCase__ )
self._logger.info(F"""Converted in {output_file}""" )
for utils_file in utils_files:
try:
_UpperCAmelCase : List[Any] = os.path.basename(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = imports_to_builder_map[f_name.replace(".py" , "" )]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""" )
shutil.copy(lowerCamelCase__ , lowerCamelCase__ )
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""" )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""" )
| 721
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase ():
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : Any = 2
while i * i <= n:
_UpperCAmelCase : Dict = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCAmelCase ():
return next(i for i in triangle_number_generator() if count_divisors(__lowerCAmelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 700
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = len(__lowerCAmelCase )
_UpperCAmelCase : str = len(matrix[0] )
_UpperCAmelCase : Any = min(__lowerCAmelCase , __lowerCAmelCase )
for row in range(__lowerCAmelCase ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = matrix[col][row] / matrix[row][row]
for i in range(__lowerCAmelCase , __lowerCAmelCase ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
_UpperCAmelCase : Union[str, Any] = True
for i in range(row + 1 , __lowerCAmelCase ):
if matrix[i][row] != 0:
_UpperCAmelCase : Optional[int] = matrix[i], matrix[row]
_UpperCAmelCase : Any = False
break
if reduce:
rank -= 1
for i in range(__lowerCAmelCase ):
_UpperCAmelCase : Dict = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 0
|
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Dict = [0] * len(__lowerCAmelCase )
for i in range(1 , len(__lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
_UpperCAmelCase : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_UpperCAmelCase : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_UpperCAmelCase : Dict = j
return prefix_result
def __lowerCAmelCase (__lowerCAmelCase ):
return max(prefix_function(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 0
|
'''simple docstring'''
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowerCamelCase__ = get_logger()
lowerCamelCase__ = None
class lowerCAmelCase__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
def __init__( self : List[str] , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Dict=None , **lowerCamelCase__ : List[str] ) ->str:
'''simple docstring'''
super().__init__(features=lowerCamelCase__ )
import jax
from jaxlib.xla_client import Device
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError(
F"""Expected {device} to be a `str` not {type(lowerCamelCase__ )}, as `jaxlib.xla_extension.Device` """
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
_UpperCAmelCase : Any = device if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase : Union[str, Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F"""Device with string identifier {self.device} not listed among the available """
F"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
F"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase : Optional[Any] = str(jax.devices()[0] )
_UpperCAmelCase : int = jnp_array_kwargs
@staticmethod
def lowerCAmelCase__ ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(lowerCamelCase__ ): device for device in jax.devices()}
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and column:
if all(
isinstance(lowerCamelCase__ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(lowerCamelCase__ , axis=0 )
return column
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[str] ) ->Optional[Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(lowerCamelCase__ , (str, bytes, type(lowerCamelCase__ )) ):
return value
elif isinstance(lowerCamelCase__ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase : Tuple = {}
if isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase : Tuple = {"dtype": jnp.intaa}
else:
_UpperCAmelCase : int = {"dtype": jnp.intaa}
elif isinstance(lowerCamelCase__ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase : Dict = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(lowerCamelCase__ , PIL.Image.Image ):
_UpperCAmelCase : Tuple = np.asarray(lowerCamelCase__ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase : Tuple = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(lowerCamelCase__ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(lowerCamelCase__ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(lowerCamelCase__ , "__array__" ) and not isinstance(lowerCamelCase__ , jax.Array ):
_UpperCAmelCase : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(lowerCamelCase__ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
elif isinstance(lowerCamelCase__ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(lowerCamelCase__ ) for substruct in data_struct] )
return self._tensorize(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : dict ) ->Optional[Any]:
'''simple docstring'''
return map_nested(self._recursive_tensorize , lowerCamelCase__ , map_list=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase : str = self.numpy_arrow_extractor().extract_row(lowerCamelCase__ )
_UpperCAmelCase : List[str] = self.python_features_decoder.decode_row(lowerCamelCase__ )
return self.recursive_tensorize(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase : int = self.numpy_arrow_extractor().extract_column(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(lowerCamelCase__ , pa_table.column_names[0] )
_UpperCAmelCase : Tuple = self.recursive_tensorize(lowerCamelCase__ )
_UpperCAmelCase : str = self._consolidate(lowerCamelCase__ )
return column
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_batch(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = self.python_features_decoder.decode_batch(lowerCamelCase__ )
_UpperCAmelCase : Any = self.recursive_tensorize(lowerCamelCase__ )
for column_name in batch:
_UpperCAmelCase : Union[str, Any] = self._consolidate(batch[column_name] )
return batch
| 703
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase ) -> List[str]:
if not nums:
return 0
_UpperCAmelCase : Optional[Any] = nums[0]
_UpperCAmelCase : int = 0
for num in nums[1:]:
_UpperCAmelCase : int = (
max_excluding + num,
max(__lowerCAmelCase , __lowerCAmelCase ),
)
return max(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 704
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.