code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=None, lowerCAmelCase_=None, lowerCAmelCase_=None, lowerCAmelCase_=None, lowerCAmelCase_=None, ):
"""simple docstring"""
if attention_mask is None:
SCREAMING_SNAKE_CASE =input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE =decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE =torch.ones(config.encoder_layers, config.encoder_attention_heads, device=_lowerCamelCase )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE =torch.ones(config.decoder_layers, config.decoder_attention_heads, device=_lowerCamelCase )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE =torch.ones(config.decoder_layers, config.decoder_attention_heads, device=_lowerCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class a_ :
"""simple docstring"""
def __init__( self : Union[str, Any] ,snake_case : Optional[Any] ,snake_case : int=13 ,snake_case : str=7 ,snake_case : Optional[int]=True ,snake_case : Optional[int]=False ,snake_case : int=99 ,snake_case : Dict=16 ,snake_case : Any=2 ,snake_case : Dict=4 ,snake_case : Union[str, Any]=4 ,snake_case : List[str]="relu" ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.1 ,snake_case : List[Any]=0.0 ,snake_case : Dict=0.0 ,snake_case : Optional[int]=20 ,snake_case : Any=2 ,snake_case : Any=1 ,snake_case : Dict=0 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =seq_length
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_hidden_layers
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =encoder_layerdrop
SCREAMING_SNAKE_CASE =decoder_layerdrop
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =eos_token_id
SCREAMING_SNAKE_CASE =pad_token_id
SCREAMING_SNAKE_CASE =bos_token_id
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
SCREAMING_SNAKE_CASE =self.eos_token_id # Eos Token
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE =input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE =decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE =self.get_config()
SCREAMING_SNAKE_CASE =prepare_mam_aaa_inputs_dict(a__ ,a__ ,a__ )
return config, inputs_dict
def _lowerCAmelCase ( self : str ):
return MaMaaaConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,encoder_layerdrop=self.encoder_layerdrop ,decoder_layerdrop=self.decoder_layerdrop ,max_position_embeddings=self.max_position_embeddings ,eos_token_id=self.eos_token_id ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,)
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self : List[Any] ,snake_case : int ,snake_case : Tuple ):
SCREAMING_SNAKE_CASE =MaMaaaModel(config=a__ ).get_decoder().to(a__ ).eval()
SCREAMING_SNAKE_CASE =inputs_dict["""input_ids"""]
SCREAMING_SNAKE_CASE =inputs_dict["""attention_mask"""]
SCREAMING_SNAKE_CASE =inputs_dict["""head_mask"""]
# first forward pass
SCREAMING_SNAKE_CASE =model(a__ ,attention_mask=a__ ,head_mask=a__ ,use_cache=a__ )
SCREAMING_SNAKE_CASE =outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, 3) ,config.vocab_size )
SCREAMING_SNAKE_CASE =ids_tensor((self.batch_size, 3) ,2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE =torch.cat([input_ids, next_tokens] ,dim=-1 )
SCREAMING_SNAKE_CASE =torch.cat([attention_mask, next_attn_mask] ,dim=-1 )
SCREAMING_SNAKE_CASE =model(a__ ,attention_mask=a__ )["""last_hidden_state"""]
SCREAMING_SNAKE_CASE =model(a__ ,attention_mask=a__ ,past_key_values=a__ )[
"""last_hidden_state"""
]
# select random slice
SCREAMING_SNAKE_CASE =ids_tensor((1,) ,output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE =output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ ,a__ ,atol=1e-2 ) )
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Tuple ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =MaMaaaModel(config=a__ ).to(a__ ).eval()
SCREAMING_SNAKE_CASE =model(**a__ )
SCREAMING_SNAKE_CASE =outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE =model.get_encoder()
encoder.save_pretrained(a__ )
SCREAMING_SNAKE_CASE =MaMaaaEncoder.from_pretrained(a__ ).to(a__ )
SCREAMING_SNAKE_CASE =encoder(inputs_dict['input_ids'] ,attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE =model.get_decoder()
decoder.save_pretrained(a__ )
SCREAMING_SNAKE_CASE =MaMaaaDecoder.from_pretrained(a__ ).to(a__ )
SCREAMING_SNAKE_CASE =decoder(
input_ids=inputs_dict['decoder_input_ids'] ,attention_mask=inputs_dict['decoder_attention_mask'] ,encoder_hidden_states=a__ ,encoder_attention_mask=inputs_dict['attention_mask'] ,)[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class a_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__UpperCAmelCase = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = True
__UpperCAmelCase = True
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : Optional[Any] ,snake_case : Dict ,snake_case : Union[str, Any] ,snake_case : Dict ,snake_case : Optional[int] ,snake_case : Tuple ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =MaMaaaModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=a__ )
def _lowerCAmelCase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
SCREAMING_SNAKE_CASE =model_class.from_pretrained(a__ ,output_loading_info=a__ )
self.assertEqual(info['missing_keys'] ,[] )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*a__ )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*a__ )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
SCREAMING_SNAKE_CASE =model_class(a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE =copy.deepcopy(self._prepare_for_class(a__ ,a__ ) )
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE =inputs["""input_ids"""]
del inputs["input_ids"]
else:
SCREAMING_SNAKE_CASE =inputs["""input_ids"""]
SCREAMING_SNAKE_CASE =inputs.get('decoder_input_ids' ,a__ )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' ,a__ )
SCREAMING_SNAKE_CASE =model.get_input_embeddings()
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE =wte(a__ )
else:
SCREAMING_SNAKE_CASE =wte(a__ )
SCREAMING_SNAKE_CASE =wte(a__ )
with torch.no_grad():
model(**a__ )[0]
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE =input_dict["""input_ids"""]
SCREAMING_SNAKE_CASE =input_ids.ne(1 ).to(a__ )
SCREAMING_SNAKE_CASE =MaMaaaForConditionalGeneration(a__ ).eval().to(a__ )
if torch_device == "cuda":
model.half()
model.generate(a__ ,attention_mask=a__ )
model.generate(num_beams=4 ,do_sample=a__ ,early_stopping=a__ ,num_return_sequences=3 )
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
return torch.tensor(_lowerCamelCase, dtype=torch.long, device=_lowerCamelCase )
_lowerCamelCase =1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Optional[Any] ):
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(a__ )
SCREAMING_SNAKE_CASE =_long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
SCREAMING_SNAKE_CASE =_long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
SCREAMING_SNAKE_CASE =prepare_mam_aaa_inputs_dict(model.config ,a__ ,a__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**a__ )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 11, 1024) )
self.assertEqual(output.shape ,a__ )
# change to expected output here
SCREAMING_SNAKE_CASE =torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] ,device=a__ )
self.assertTrue(torch.allclose(output[:, :3, :3] ,a__ ,atol=a__ ) )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(a__ )
# change to intended input
SCREAMING_SNAKE_CASE =_long_tensor([[128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38, 2]] )
SCREAMING_SNAKE_CASE =_long_tensor([[2, 128028, 98, 12, 30527, 2732, 159, 7755, 61904, 39144, 38]] )
SCREAMING_SNAKE_CASE =prepare_mam_aaa_inputs_dict(model.config ,a__ ,a__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**a__ )[0]
SCREAMING_SNAKE_CASE =torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape ,a__ )
# change to expected output here
SCREAMING_SNAKE_CASE =torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] ,device=a__ )
self.assertTrue(torch.allclose(output[:, :3, :3] ,a__ ,atol=a__ ) )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(a__ )
SCREAMING_SNAKE_CASE =MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' ,src_lang='fr' ,tgt_lang='en' )
SCREAMING_SNAKE_CASE =[
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
SCREAMING_SNAKE_CASE =tokenizer(a__ ,padding=a__ ,return_tensors='pt' )
SCREAMING_SNAKE_CASE =model.generate(
input_ids=dct['input_ids'].to(a__ ) ,attention_mask=dct['attention_mask'].to(a__ ) ,num_beams=5 ,forced_bos_token_id=tokenizer.get_lang_id('en' ) ,)
SCREAMING_SNAKE_CASE =[
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
SCREAMING_SNAKE_CASE =tokenizer.batch_decode(
hypotheses_batch.tolist() ,clean_up_tokenization_spaces=a__ ,skip_special_tokens=a__ )
assert generated == expected_en
| 334
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44
| 0
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=7 , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=99 , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=512 , lowercase_=16 , lowercase_=2 , lowercase_=0.02 , lowercase_=3 , lowercase_=4 , lowercase_=None , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : Optional[int] = batch_size
UpperCAmelCase_ : str = seq_length
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_input_mask
UpperCAmelCase_ : List[str] = use_token_type_ids
UpperCAmelCase_ : Tuple = use_labels
UpperCAmelCase_ : List[Any] = vocab_size
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : Union[str, Any] = num_hidden_layers
UpperCAmelCase_ : Any = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : int = attention_probs_dropout_prob
UpperCAmelCase_ : Dict = max_position_embeddings
UpperCAmelCase_ : List[Any] = type_vocab_size
UpperCAmelCase_ : int = type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = initializer_range
UpperCAmelCase_ : List[str] = num_labels
UpperCAmelCase_ : Optional[int] = num_choices
UpperCAmelCase_ : Optional[Any] = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ : int = None
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return LlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Any = LlamaModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_ : Dict = model(a__ , attention_mask=a__ )
UpperCAmelCase_ : str = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : Tuple = LlamaModel(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_ : List[str] = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
UpperCAmelCase_ : Optional[Any] = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , )
UpperCAmelCase_ : Any = model(a__ , attention_mask=a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = LlamaForCausalLM(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_ : int = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Optional[int] = True
UpperCAmelCase_ : Tuple = LlamaForCausalLM(config=a__ )
model.to(a__ )
model.eval()
# first forward pass
UpperCAmelCase_ : Union[str, Any] = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , use_cache=a__ , )
UpperCAmelCase_ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ : Optional[int] = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_ : Tuple = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , output_hidden_states=a__ , )["""hidden_states"""][0]
UpperCAmelCase_ : Optional[Any] = model(
a__ , attention_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , past_key_values=a__ , output_hidden_states=a__ , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase_ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ : List[str] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1E-3 ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = self.prepare_config_and_inputs()
(
UpperCAmelCase_
) : Union[str, Any] = config_and_inputs
UpperCAmelCase_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class A_ (SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Dict = (LlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
{
"feature-extraction": LlamaModel,
"text-classification": LlamaForSequenceClassification,
"text-generation": LlamaForCausalLM,
"zero-shot": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Dict = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = LlamaModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self , config_class=a__ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ : List[str] = type
self.model_tester.create_and_check_model(*a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Optional[int] = 3
UpperCAmelCase_ : Union[str, Any] = input_dict["""input_ids"""]
UpperCAmelCase_ : str = input_ids.ne(1 ).to(a__ )
UpperCAmelCase_ : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : List[Any] = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_ : int = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = 3
UpperCAmelCase_ : str = """single_label_classification"""
UpperCAmelCase_ : Optional[int] = input_dict["""input_ids"""]
UpperCAmelCase_ : Union[str, Any] = input_ids.ne(1 ).to(a__ )
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_ : Optional[Any] = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : List[str] = 3
UpperCAmelCase_ : int = """multi_label_classification"""
UpperCAmelCase_ : Tuple = input_dict["""input_ids"""]
UpperCAmelCase_ : Dict = input_ids.ne(1 ).to(a__ )
UpperCAmelCase_ : int = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase_ : Union[str, Any] = LlamaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_ : int = model(a__ , attention_mask=a__ , labels=a__ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("LLaMA buffers include complex numbers, which breaks this test" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase_ : int = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : List[str] = LlamaModel(a__ )
original_model.to(a__ )
original_model.eval()
UpperCAmelCase_ : List[str] = original_model(a__ ).last_hidden_state
UpperCAmelCase_ : Optional[Any] = original_model(a__ ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
UpperCAmelCase_ : Optional[Any] = LlamaModel(a__ )
scaled_model.to(a__ )
scaled_model.eval()
UpperCAmelCase_ : Any = scaled_model(a__ ).last_hidden_state
UpperCAmelCase_ : Optional[Any] = scaled_model(a__ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(a__ , a__ , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(a__ , a__ , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(a__ , a__ , atol=1E-5 ) )
@require_torch
class A_ (unittest.TestCase ):
'''simple docstring'''
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCAmelCase_ : List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf" , device_map="auto" )
UpperCAmelCase_ : Tuple = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
UpperCAmelCase_ : Optional[int] = torch.tensor([[-6.65_50, -4.12_27, -4.98_59, -3.24_06, 0.82_62, -3.00_33, 1.29_64, -3.36_99]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ : List[str] = torch.tensor([-12.8281, -7.44_53, -0.46_39, -8.06_25, -7.25_00, -8.00_00, -6.48_83, -7.76_95, -7.84_38, -7.03_12, -6.21_88, -7.13_28, -1.84_96, 1.99_61, -8.62_50, -6.72_27, -12.8281, -6.94_92, -7.07_42, -7.78_52, -7.58_20, -7.90_62, -6.93_75, -7.98_05, -8.34_38, -8.15_62, -8.04_69, -7.62_50, -7.74_22, -7.33_98,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCAmelCase_ : List[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-hf" , device_map="auto" )
UpperCAmelCase_ : Tuple = model(torch.tensor(a__ ) )
# Expected mean on dim = -1
UpperCAmelCase_ : Union[str, Any] = torch.tensor([[-2.06_22, -1.27_94, -1.16_38, -0.97_88, -1.46_03, -1.02_38, -1.78_93, -1.44_11]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ : List[str] = torch.tensor([-8.14_06, -8.05_47, 2.74_61, -1.23_44, -0.14_48, -1.82_62, -1.00_20, -1.81_54, -1.68_95, -1.85_16, -2.35_74, -0.92_77, 3.75_98, 6.57_42, -1.29_98, -0.11_77, -8.14_06, -2.96_88, -2.91_99, -3.16_99, -3.52_54, -2.35_55, -2.79_88, -3.41_41, -2.82_62, -4.51_95, -3.33_79, -3.31_64, -2.78_32, -3.02_73] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Logits are not exactly the same, once we fix the instabalities somehow, will update!" )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCAmelCase_ : Dict = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-13b-chat-hf" , device_map="auto" )
UpperCAmelCase_ : Tuple = model(torch.tensor(a__ ) )
# Expected mean on dim = -1
UpperCAmelCase_ : Optional[Any] = torch.tensor([[-0.85_62, -1.85_20, -0.75_51, -0.41_62, -1.51_61, -1.20_38, -2.48_23, -2.32_54]] )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
UpperCAmelCase_ : List[str] = torch.tensor([-2.22_27, 4.88_28, 0.90_23, -0.45_78, -0.78_71, -0.10_33, -0.62_21, -0.57_86, -0.78_03, -1.06_74, -1.29_20, -0.15_70, 0.80_08, 2.07_23, -0.94_97, 0.27_71, -2.22_27, -0.76_12, -1.43_46, -1.20_61, -1.64_26, -0.30_00, -0.71_39, -1.19_34, -1.86_91, -1.69_73, -1.59_47, -1.27_05, -0.35_23, -0.55_13] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
@unittest.skip(
"Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test" )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = [1, 306, 4658, 278, 6593, 310, 2834, 338]
UpperCAmelCase_ : Optional[Any] = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-70b-hf" , device_map="auto" )
UpperCAmelCase_ : str = model(torch.tensor(a__ ) )
UpperCAmelCase_ : Tuple = torch.tensor(
[[-4.23_27, -3.33_60, -4.66_65, -4.76_31, -1.81_80, -3.41_70, -1.42_11, -3.18_10]] , dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) , a__ , atol=1E-2 , rtol=1E-2 )
# fmt: off
UpperCAmelCase_ : Union[str, Any] = torch.tensor([-9.49_22, -3.95_51, 1.79_98, -5.67_58, -5.10_55, -5.89_84, -4.83_20, -6.80_86, -6.53_91, -5.61_72, -5.58_20, -5.53_52, 1.78_81, 3.62_89, -6.51_17, -3.47_85, -9.50_00, -6.03_52, -6.81_25, -6.01_95, -6.68_36, -5.47_27, -6.28_12, -6.03_91, -7.33_98, -7.42_97, -7.48_44, -6.58_20, -5.87_89, -5.53_12] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] , a__ , atol=1E-5 , rtol=1E-5 )
@unittest.skip("Model is curently gated" )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
UpperCAmelCase_ : str = """Simply put, the theory of relativity states that """
UpperCAmelCase_ : List[str] = LlamaTokenizer.from_pretrained("meta-llama/Llama-2-13b-chat-hf" )
UpperCAmelCase_ : Optional[Any] = tokenizer.encode(a__ , return_tensors="pt" )
UpperCAmelCase_ : Optional[Any] = LlamaForCausalLM.from_pretrained(
"meta-llama/Llama-2-13b-chat-hf" , device_map="sequential" , use_safetensors=a__ )
# greedy generation outputs
UpperCAmelCase_ : Optional[int] = model.generate(a__ , max_new_tokens=64 , top_p=a__ , temperature=1 , do_sample=a__ )
UpperCAmelCase_ : Dict = tokenizer.decode(generated_ids[0] , skip_special_tokens=a__ )
self.assertEqual(a__ , a__ )
| 61
|
"""simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 0
|
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = torch.exp(_lowerCamelCase )
lowercase = torch.sum(_lowerCamelCase , dim=1 ) # sum of exp(x_i)
lowercase = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case ):
super().__init__()
lowercase = config.output_attentions
lowercase = config.output_hidden_states
lowercase = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
lowercase = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
lowercase = [-1 for _ in range(config.num_hidden_layers )]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
lowercase = x
else:
lowercase = x
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
lowercase = ()
lowercase = ()
lowercase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
lowercase = layer_outputs[0]
if self.output_attentions:
lowercase = all_attentions + (layer_outputs[1],)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = current_outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = current_outputs + (all_attentions,)
lowercase = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
lowercase = highway_exit[0]
lowercase = entropy(a__ )
lowercase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
lowercase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
lowercase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
lowercase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
lowercase = (hidden_states,)
if self.output_hidden_states:
lowercase = outputs + (all_hidden_states,)
if self.output_attentions:
lowercase = outputs + (all_attentions,)
lowercase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , SCREAMING_SNAKE_CASE_ , )
class A_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , snake_case ):
super().__init__(a__ )
lowercase = config
lowercase = BertEmbeddings(a__ )
lowercase = DeeBertEncoder(a__ )
lowercase = BertPooler(a__ )
self.init_weights()
def SCREAMING_SNAKE_CASE__ ( self ):
self.encoder.init_highway_pooler(self.pooler )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.embeddings.word_embeddings
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = value
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
lowercase = input_ids.size()
elif inputs_embeds is not None:
lowercase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
lowercase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
lowercase = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
lowercase = torch.ones(a__ , device=a__ )
if token_type_ids is None:
lowercase = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
lowercase = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
lowercase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
lowercase = encoder_attention_mask[:, None, None, :]
lowercase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
lowercase = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
lowercase = self.get_head_mask(a__ , self.config.num_hidden_layers )
lowercase = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
lowercase = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
lowercase = encoder_outputs[0]
lowercase = self.pooler(a__ )
lowercase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class A_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , snake_case , snake_case ):
lowercase = message
lowercase = exit_layer # start from 1!
class A_ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case ):
super().__init__()
lowercase = BertPooler(a__ )
lowercase = nn.Dropout(config.hidden_dropout_prob )
lowercase = nn.Linear(config.hidden_size , config.num_labels )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# Pooler
lowercase = encoder_outputs[0]
lowercase = self.pooler(a__ )
# "return" pooler_output
# BertModel
lowercase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
lowercase = bmodel_output[1]
lowercase = self.dropout(a__ )
lowercase = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. """ , SCREAMING_SNAKE_CASE_ , )
class A_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , snake_case ):
super().__init__(a__ )
lowercase = config.num_labels
lowercase = config.num_hidden_layers
lowercase = DeeBertModel(a__ )
lowercase = nn.Dropout(config.hidden_dropout_prob )
lowercase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def SCREAMING_SNAKE_CASE__ ( self , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=-1 , snake_case=False , ):
lowercase = self.num_layers
try:
lowercase = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
lowercase = outputs[1]
lowercase = self.dropout(a__ )
lowercase = self.classifier(a__ )
lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase = e.message
lowercase = e.exit_layer
lowercase = outputs[0]
if not self.training:
lowercase = entropy(a__ )
lowercase = []
lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowercase = []
for highway_exit in outputs[-1]:
lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowercase = MSELoss()
lowercase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase = CrossEntropyLoss()
lowercase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
lowercase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowercase = (loss,) + outputs
if not self.training:
lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 195
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
| 0
|
from math import factorial
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Dict = real
if isinstance(a__, a__ ):
UpperCamelCase__ : int = [1] * rank
else:
UpperCamelCase__ : int = rank
def __repr__( self ) -> Optional[int]:
"""simple docstring"""
return (
f"{self.real}+"
f"{'+'.join(str(a__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : str = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real, a__ )
def __add__( self, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if not isinstance(a__, a__ ):
return Dual(self.real + other, self.duals )
UpperCamelCase__ : List[str] = self.duals.copy()
UpperCamelCase__ : Optional[int] = other.duals.copy()
if len(a__ ) > len(a__ ):
o_dual.extend([1] * (len(a__ ) - len(a__ )) )
elif len(a__ ) < len(a__ ):
s_dual.extend([1] * (len(a__ ) - len(a__ )) )
UpperCamelCase__ : List[str] = []
for i in range(len(a__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real, a__ )
a : Optional[Any] = __add__
def __sub__( self, __magic_name__ ) -> List[Any]:
"""simple docstring"""
return self + other * -1
def __mul__( self, __magic_name__ ) -> List[Any]:
"""simple docstring"""
if not isinstance(a__, a__ ):
UpperCamelCase__ : Any = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other, a__ )
UpperCamelCase__ : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real, a__ )
a : Optional[Any] = __mul__
def __truediv__( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
if not isinstance(a__, a__ ):
UpperCamelCase__ : Tuple = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other, a__ )
raise ValueError
def __floordiv__( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(a__, a__ ):
UpperCamelCase__ : List[str] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other, a__ )
raise ValueError
def __pow__( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
if n < 0 or isinstance(a__, a__ ):
raise ValueError('''power must be a positive integer''' )
if n == 0:
return 1
if n == 1:
return self
UpperCamelCase__ : Union[str, Any] = self
for _ in range(n - 1 ):
x *= self
return x
def lowerCAmelCase_ ( __UpperCAmelCase: Union[str, Any] , __UpperCAmelCase: int , __UpperCAmelCase: List[str] ) -> Tuple:
if not callable(_lowerCamelCase ):
raise ValueError('''differentiate() requires a function as input for func''' )
if not isinstance(_lowerCamelCase , (float, int) ):
raise ValueError('''differentiate() requires a float as input for position''' )
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError('''differentiate() requires an int as input for order''' )
UpperCamelCase__ : int = Dual(_lowerCamelCase , 1 )
UpperCamelCase__ : str = func(_lowerCamelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCAmelCase_ ( __UpperCAmelCase: Dict ) -> Dict:
return y**2 * y**4
print(differentiate(f, 9, 2))
| 201
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44
| 0
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
lowerCamelCase = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _a ( unittest.TestCase):
_a : int = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_a : List[str] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_a : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_a : Optional[int] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def UpperCAmelCase__( self : Any )-> List[Any]:
lowerCAmelCase__ : Tuple = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' )
lowerCAmelCase__ : Union[str, Any] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
lowerCAmelCase__ : List[Any] = text_classifier('''This is great !''' , top_k=2 )
self.assertEqual(
nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}] )
lowerCAmelCase__ : Any = text_classifier(['''This is great !''', '''This is bad'''] , top_k=2 )
self.assertEqual(
nested_simplify(a__ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
lowerCAmelCase__ : Optional[int] = text_classifier('''This is great !''' , top_k=1 )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
# Legacy behavior
lowerCAmelCase__ : Tuple = text_classifier('''This is great !''' , return_all_scores=a__ )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
lowerCAmelCase__ : int = text_classifier('''This is great !''' , return_all_scores=a__ )
self.assertEqual(
nested_simplify(a__ ) , [[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}]] )
lowerCAmelCase__ : str = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=a__ )
self.assertEqual(
nested_simplify(a__ ) , [
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
[{'''label''': '''LABEL_0''', '''score''': 0.504}, {'''label''': '''LABEL_1''', '''score''': 0.496}],
] , )
lowerCAmelCase__ : str = text_classifier(['''This is great !''', '''Something else'''] , return_all_scores=a__ )
self.assertEqual(
nested_simplify(a__ ) , [
{'''label''': '''LABEL_0''', '''score''': 0.504},
{'''label''': '''LABEL_0''', '''score''': 0.504},
] , )
@require_torch
def UpperCAmelCase__( self : Union[str, Any] )-> Optional[Any]:
import torch
lowerCAmelCase__ : Any = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''pt''' , device=torch.device('''cpu''' ) , )
lowerCAmelCase__ : Union[str, Any] = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@require_tf
def UpperCAmelCase__( self : Optional[Any] )-> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = pipeline(
task='''text-classification''' , model='''hf-internal-testing/tiny-random-distilbert''' , framework='''tf''' )
lowerCAmelCase__ : Tuple = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''LABEL_0''', '''score''': 0.504}] )
@slow
@require_torch
def UpperCAmelCase__( self : Tuple )-> int:
lowerCAmelCase__ : Union[str, Any] = pipeline('''text-classification''' )
lowerCAmelCase__ : str = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowerCAmelCase__ : Optional[int] = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowerCAmelCase__ : int = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
@slow
@require_tf
def UpperCAmelCase__( self : Optional[Any] )-> Any:
lowerCAmelCase__ : Tuple = pipeline('''text-classification''' , framework='''tf''' )
lowerCAmelCase__ : int = text_classifier('''This is great !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 1.0}] )
lowerCAmelCase__ : int = text_classifier('''This is bad !''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''NEGATIVE''', '''score''': 1.0}] )
lowerCAmelCase__ : Optional[Any] = text_classifier('''Birds are a type of animal''' )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': '''POSITIVE''', '''score''': 0.988}] )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any )-> Optional[int]:
lowerCAmelCase__ : List[str] = TextClassificationPipeline(model=a__ , tokenizer=a__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict )-> Dict:
lowerCAmelCase__ : Dict = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
lowerCAmelCase__ : Any = """HuggingFace is in"""
lowerCAmelCase__ : int = text_classifier(a__ )
self.assertEqual(nested_simplify(a__ ) , [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
lowerCAmelCase__ : Optional[Any] = ["""HuggingFace is in """, """Paris is in France"""]
lowerCAmelCase__ : Optional[Any] = text_classifier(a__ )
self.assertEqual(
nested_simplify(a__ ) , [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}, {'''label''': ANY(a__ ), '''score''': ANY(a__ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['''label'''] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
lowerCAmelCase__ : Dict = text_classifier(a__ , top_k=a__ )
lowerCAmelCase__ : Optional[Any] = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(a__ ) , [[{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] * N, [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] * N] , )
lowerCAmelCase__ : str = {"""text""": """HuggingFace is in """, """text_pair""": """Paris is in France"""}
lowerCAmelCase__ : Dict = text_classifier(a__ )
self.assertEqual(
nested_simplify(a__ ) , {'''label''': ANY(a__ ), '''score''': ANY(a__ )} , )
self.assertTrue(outputs['''label'''] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
lowerCAmelCase__ : Any = [["""HuggingFace is in """, """Paris is in France"""]]
with self.assertRaises(a__ ):
text_classifier(a__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
lowerCAmelCase__ : Optional[Any] = text_classifier([[['''HuggingFace is in ''', '''Paris is in France''']]] )
self.assertEqual(
nested_simplify(a__ ) , [{'''label''': ANY(a__ ), '''score''': ANY(a__ )}] , )
self.assertTrue(outputs[0]['''label'''] in model.config.idalabel.values() )
| 131
|
"""simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44
| 0
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
_UpperCamelCase = logging.getLogger(__name__)
def a_ ( ) -> Union[str, Any]:
__lowerCamelCase : List[Any] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' ,type=_lowerCamelCase ,default='data/dump.txt' ,help='The path to the data.' )
parser.add_argument('--tokenizer_type' ,type=_lowerCamelCase ,default='bert' ,choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' ,type=_lowerCamelCase ,default='bert-base-uncased' ,help='The tokenizer to use.' )
parser.add_argument('--dump_file' ,type=_lowerCamelCase ,default='data/dump' ,help='The dump file prefix.' )
__lowerCamelCase : int = parser.parse_args()
logger.info(F'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
__lowerCamelCase : int = BertTokenizer.from_pretrained(args.tokenizer_name )
__lowerCamelCase : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
__lowerCamelCase : Union[str, Any] = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
__lowerCamelCase : Optional[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__lowerCamelCase : Union[str, Any] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
__lowerCamelCase : Dict = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
__lowerCamelCase : Union[str, Any] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__lowerCamelCase : Any = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
__lowerCamelCase : List[Any] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(F'Loading text from {args.file_path}' )
with open(args.file_path ,'r' ,encoding='utf8' ) as fp:
__lowerCamelCase : Tuple = fp.readlines()
logger.info('Start encoding' )
logger.info(F'{len(_lowerCamelCase )} examples to process.' )
__lowerCamelCase : Dict = []
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : List[str] = 10000
__lowerCamelCase : str = time.time()
for text in data:
__lowerCamelCase : Optional[int] = F'{bos} {text.strip()} {sep}'
__lowerCamelCase : Dict = tokenizer.encode(_lowerCamelCase ,add_special_tokens=_lowerCamelCase )
rslt.append(_lowerCamelCase )
iter += 1
if iter % interval == 0:
__lowerCamelCase : int = time.time()
logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
__lowerCamelCase : List[Any] = time.time()
logger.info('Finished binarization' )
logger.info(F'{len(_lowerCamelCase )} examples processed.' )
__lowerCamelCase : List[Any] = F'{args.dump_file}.{args.tokenizer_name}.pickle'
__lowerCamelCase : List[Any] = tokenizer.vocab_size
if vocab_size < (1 << 16):
__lowerCamelCase : Tuple = [np.uintaa(_lowerCamelCase ) for d in rslt]
else:
__lowerCamelCase : str = [np.intaa(_lowerCamelCase ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F'Dump to {dp_file}' )
with open(_lowerCamelCase ,'wb' ) as handle:
pickle.dump(rslt_ ,_lowerCamelCase ,protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 208
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Tuple = logging.get_logger(__name__)
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int:
lowercase : str = original_name.split(""".""" )[0]
lowercase : Optional[int] = key.split(""".""" )
lowercase : Any = int(key_list[key_list.index(_lowerCamelCase ) - 2] )
lowercase : Tuple = int(key_list[key_list.index(_lowerCamelCase ) - 1] )
lowercase : Optional[int] = orig_block_num - offset
lowercase : Tuple = key.replace(f"{orig_block_num}.{layer_num}.{original_name}" , f"block.{new_block_num}.{layer_num}.{new_name}" )
return key
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
lowercase : Optional[Any] = OrderedDict()
lowercase : int = 0, 0
for key, value in state_dict.items():
if key.startswith("""network""" ):
lowercase : Union[str, Any] = key.replace("""network""" , """poolformer.encoder""" )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""" ) and "patch_embed" not in key:
patch_emb_offset += 1
lowercase : Optional[Any] = key[: key.find("""proj""" )]
lowercase : Optional[int] = key.replace(_lowerCamelCase , f"patch_embeddings.{total_embed_found}." )
lowercase : Tuple = key.replace("""proj""" , """projection""" )
if key.endswith("""bias""" ):
total_embed_found += 1
if "patch_embeddings" in key:
lowercase : Optional[Any] = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
lowercase : Optional[int] = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , """mlp.fc1""" , """output.conv1""" )
if "mlp.fc2" in key:
lowercase : List[str] = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , """mlp.fc2""" , """output.conv2""" )
if "norm1" in key:
lowercase : List[Any] = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , """norm1""" , """before_norm""" )
if "norm2" in key:
lowercase : List[str] = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , """norm2""" , """after_norm""" )
if "layer_scale_1" in key:
lowercase : Dict = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , """layer_scale_1""" , """layer_scale_1""" )
if "layer_scale_2" in key:
lowercase : Tuple = replace_key_with_offset(_lowerCamelCase , _lowerCamelCase , """layer_scale_2""" , """layer_scale_2""" )
if "head" in key:
lowercase : Union[str, Any] = key.replace("""head""" , """classifier""" )
lowercase : str = value
return new_state_dict
def _snake_case( ) -> Dict:
lowercase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase : List[str] = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return image
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Dict:
lowercase : int = PoolFormerConfig()
# set attributes based on model_name
lowercase : int = """huggingface/label-files"""
lowercase : Tuple = model_name[-3:]
lowercase : str = 1_000
lowercase : Tuple = """imagenet-1k-id2label.json"""
lowercase : Dict = (1, 1_000)
# set config attributes
lowercase : List[str] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
lowercase : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
lowercase : Optional[int] = idalabel
lowercase : str = {v: k for k, v in idalabel.items()}
if size == "s12":
lowercase : List[str] = [2, 2, 6, 2]
lowercase : int = [64, 128, 320, 512]
lowercase : int = 4.0
lowercase : List[Any] = 0.9
elif size == "s24":
lowercase : Tuple = [4, 4, 12, 4]
lowercase : Any = [64, 128, 320, 512]
lowercase : int = 4.0
lowercase : List[str] = 0.9
elif size == "s36":
lowercase : List[str] = [6, 6, 18, 6]
lowercase : Optional[Any] = [64, 128, 320, 512]
lowercase : Any = 4.0
lowercase : int = 1e-6
lowercase : str = 0.9
elif size == "m36":
lowercase : str = [6, 6, 18, 6]
lowercase : Any = [96, 192, 384, 768]
lowercase : Tuple = 4.0
lowercase : List[Any] = 1e-6
lowercase : List[str] = 0.95
elif size == "m48":
lowercase : Union[str, Any] = [8, 8, 24, 8]
lowercase : int = [96, 192, 384, 768]
lowercase : str = 4.0
lowercase : Any = 1e-6
lowercase : str = 0.95
else:
raise ValueError(f"Size {size} not supported" )
# load image processor
lowercase : List[str] = PoolFormerImageProcessor(crop_pct=_lowerCamelCase )
# Prepare image
lowercase : int = prepare_img()
lowercase : str = image_processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values
logger.info(f"Converting model {model_name}..." )
# load original state dict
lowercase : List[Any] = torch.load(_lowerCamelCase , map_location=torch.device("""cpu""" ) )
# rename keys
lowercase : str = rename_keys(_lowerCamelCase )
# create HuggingFace model and load state dict
lowercase : Optional[int] = PoolFormerForImageClassification(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# Define image processor
lowercase : str = PoolFormerImageProcessor(crop_pct=_lowerCamelCase )
lowercase : Optional[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" ).pixel_values
# forward pass
lowercase : Union[str, Any] = model(_lowerCamelCase )
lowercase : Dict = outputs.logits
# define expected logit slices for different models
if size == "s12":
lowercase : str = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
lowercase : Optional[Any] = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
lowercase : Union[str, Any] = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
lowercase : str = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
lowercase : Optional[int] = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"Size {size} not supported" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , _lowerCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(f"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
lowercase : int = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
default="""poolformer_s12""",
type=str,
help="""Name of the model you\'d like to convert.""",
)
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowercase : Tuple = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 20
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44
| 0
|
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def __A ( a_ :np.ndarray , a_ :float) -> np.ndarray:
# For applying gaussian function for each element in matrix.
__a : Tuple = math.sqrt(_lowerCamelCase)
__a : Dict = 1 / (sigma * math.sqrt(2 * math.pi))
return cons * np.exp(-((img / sigma) ** 2) * 0.5)
def __A ( a_ :np.ndarray , a_ :int , a_ :int , a_ :int) -> np.ndarray:
__a : List[Any] = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __A ( a_ :int , a_ :float) -> np.ndarray:
# Creates a gaussian kernel of given dimension.
__a : List[str] = np.zeros((kernel_size, kernel_size))
for i in range(0 , _lowerCamelCase):
for j in range(0 , _lowerCamelCase):
__a : Union[str, Any] = math.sqrt(
abs(i - kernel_size // 2) ** 2 + abs(j - kernel_size // 2) ** 2)
return vec_gaussian(_lowerCamelCase , _lowerCamelCase)
def __A ( a_ :np.ndarray , a_ :float , a_ :float , a_ :int , ) -> np.ndarray:
__a : Union[str, Any] = np.zeros(img.shape)
__a : Dict = get_gauss_kernel(_lowerCamelCase , _lowerCamelCase)
__a : Tuple = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2):
for j in range(kernel_size // 2 , size_y - kernel_size // 2):
__a : Union[str, Any] = get_slice(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
__a : List[Any] = img_s - img_s[kernel_size // 2, kernel_size // 2]
__a : List[Any] = vec_gaussian(_lowerCamelCase , _lowerCamelCase)
__a : Union[str, Any] = np.multiply(_lowerCamelCase , _lowerCamelCase)
__a : int = np.multiply(_lowerCamelCase , _lowerCamelCase)
__a : str = np.sum(_lowerCamelCase) / np.sum(_lowerCamelCase)
__a : Optional[int] = val
return imga
def __A ( a_ :list) -> tuple:
__a : List[Any] = args[1] if args[1:] else """../image_data/lena.jpg"""
__a : Optional[Any] = float(args[2]) if args[2:] else 1.0
__a : Dict = float(args[3]) if args[3:] else 1.0
if args[4:]:
__a : List[str] = int(args[4])
__a : List[str] = kernel_size + abs(kernel_size % 2 - 1)
else:
__a : Tuple = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
A = parse_args(sys.argv)
A = cva.imread(filename, 0)
cva.imshow('''input image''', img)
A = img / 255
A = out.astype('''float32''')
A = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
A = out * 255
A = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 160
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44
| 0
|
"""simple docstring"""
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 91
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->list:
'''simple docstring'''
for i in range(len(_lowerCamelCase ) - 1 , 0 , -1 ):
a : Dict = False
for j in range(_lowerCamelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
a : List[str] = unsorted[j - 1], unsorted[j]
a : int = True
for j in range(_lowerCamelCase ):
if unsorted[j] > unsorted[j + 1]:
a : Any = unsorted[j + 1], unsorted[j]
a : int = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Tuple = input('''Enter numbers separated by a comma:\n''').strip()
a : Optional[Any] = [int(item) for item in user_input.split(''',''')]
print(F'''{cocktail_shaker_sort(unsorted) = }''')
| 105
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ (SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__lowerCamelCase : Tuple = CycleDiffusionPipeline
__lowerCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"negative_prompt",
"height",
"width",
"negative_prompt_embeds",
}
__lowerCamelCase : str = PipelineTesterMixin.required_optional_params - {"latents"}
__lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""source_prompt"""} )
__lowerCamelCase : Tuple = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCamelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def snake_case_ ( self):
torch.manual_seed(0)
lowercase__ : int = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
lowercase__ : str = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , num_train_timesteps=1000 , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0)
lowercase__ : Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0)
lowercase__ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__ : Tuple = CLIPTextModel(a__)
lowercase__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
lowercase__ : List[str] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def snake_case_ ( self , a , a=0):
lowercase__ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(a__)).to(a__)
lowercase__ : str = image / 2 + 0.5
if str(a__).startswith('mps'):
lowercase__ : Union[str, Any] = torch.manual_seed(a__)
else:
lowercase__ : Any = torch.Generator(device=a__).manual_seed(a__)
lowercase__ : Optional[int] = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def snake_case_ ( self):
lowercase__ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase__ : List[Any] = self.get_dummy_components()
lowercase__ : List[str] = CycleDiffusionPipeline(**a__)
lowercase__ : List[str] = pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
lowercase__ : int = self.get_dummy_inputs(a__)
lowercase__ : Optional[Any] = pipe(**a__)
lowercase__ : Optional[Any] = output.images
lowercase__ : Optional[int] = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowercase__ : Dict = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU')
def snake_case_ ( self):
lowercase__ : str = self.get_dummy_components()
for name, module in components.items():
if hasattr(a__ , 'half'):
lowercase__ : Any = module.half()
lowercase__ : int = CycleDiffusionPipeline(**a__)
lowercase__ : Dict = pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
lowercase__ : Optional[int] = self.get_dummy_inputs(a__)
lowercase__ : Optional[int] = pipe(**a__)
lowercase__ : List[str] = output.images
lowercase__ : str = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
lowercase__ : int = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
@skip_mps
def snake_case_ ( self):
return super().test_save_load_local()
@unittest.skip('non-deterministic pipeline')
def snake_case_ ( self):
return super().test_inference_batch_single_identical()
@skip_mps
def snake_case_ ( self):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def snake_case_ ( self):
return super().test_save_load_optional_components()
@skip_mps
def snake_case_ ( self):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def snake_case_ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self):
lowercase__ : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png')
lowercase__ : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy')
lowercase__ : Optional[int] = init_image.resize((512, 512))
lowercase__ : Optional[Any] = """CompVis/stable-diffusion-v1-4"""
lowercase__ : List[str] = DDIMScheduler.from_pretrained(a__ , subfolder='scheduler')
lowercase__ : Tuple = CycleDiffusionPipeline.from_pretrained(
a__ , scheduler=a__ , safety_checker=a__ , torch_dtype=torch.floataa , revision='fp16')
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
pipe.enable_attention_slicing()
lowercase__ : Dict = """A black colored car"""
lowercase__ : str = """A blue colored car"""
lowercase__ : int = torch.manual_seed(0)
lowercase__ : Optional[int] = pipe(
prompt=a__ , source_prompt=a__ , image=a__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=a__ , output_type='np' , )
lowercase__ : int = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image).max() < 5e-1
def snake_case_ ( self):
lowercase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/cycle-diffusion/black_colored_car.png')
lowercase__ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy')
lowercase__ : List[Any] = init_image.resize((512, 512))
lowercase__ : Tuple = """CompVis/stable-diffusion-v1-4"""
lowercase__ : Dict = DDIMScheduler.from_pretrained(a__ , subfolder='scheduler')
lowercase__ : Optional[int] = CycleDiffusionPipeline.from_pretrained(a__ , scheduler=a__ , safety_checker=a__)
pipe.to(a__)
pipe.set_progress_bar_config(disable=a__)
pipe.enable_attention_slicing()
lowercase__ : Union[str, Any] = """A black colored car"""
lowercase__ : Any = """A blue colored car"""
lowercase__ : Optional[int] = torch.manual_seed(0)
lowercase__ : Dict = pipe(
prompt=a__ , source_prompt=a__ , image=a__ , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=a__ , output_type='np' , )
lowercase__ : Optional[int] = output.images
assert np.abs(image - expected_image).max() < 2e-2
| 214
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = KandinskyImgaImgPipeline
_UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
_lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCAmelCase : int = MultilingualCLIP(a__ )
_lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : int = self.dummy_unet
_lowerCAmelCase : Dict = self.dummy_movq
_lowerCAmelCase : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ )
_lowerCAmelCase : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , a__ , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**a__ )
_lowerCAmelCase : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k"""
_lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
_lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Any = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase : Union[str, Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 44
| 0
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowerCamelCase =logging.getLogger(__name__)
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
} , )
@dataclass
class a_ :
"""simple docstring"""
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Evaluation language. Also train language if `train_language` is set to None.'} )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Train language if it is different from the evaluation language.'} )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'} , )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__UpperCAmelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
__UpperCAmelCase = field(
default=SCREAMING_SNAKE_CASE_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_xnli', _lowerCamelCase )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout )], )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
SCREAMING_SNAKE_CASE =training_args.get_process_log_level()
logger.setLevel(_lowerCamelCase )
datasets.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.set_verbosity(_lowerCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
SCREAMING_SNAKE_CASE =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
SCREAMING_SNAKE_CASE =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
SCREAMING_SNAKE_CASE =load_dataset(
'xnli', model_args.language, split='train', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
else:
SCREAMING_SNAKE_CASE =load_dataset(
'xnli', model_args.train_language, split='train', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
SCREAMING_SNAKE_CASE =train_dataset.features["""label"""].names
if training_args.do_eval:
SCREAMING_SNAKE_CASE =load_dataset(
'xnli', model_args.language, split='validation', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
SCREAMING_SNAKE_CASE =eval_dataset.features["""label"""].names
if training_args.do_predict:
SCREAMING_SNAKE_CASE =load_dataset(
'xnli', model_args.language, split='test', cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, )
SCREAMING_SNAKE_CASE =predict_dataset.features["""label"""].names
# Labels
SCREAMING_SNAKE_CASE =len(_lowerCamelCase )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=_lowerCamelCase, idalabel={str(_lowerCamelCase ): label for i, label in enumerate(_lowerCamelCase )}, labelaid={label: i for i, label in enumerate(_lowerCamelCase )}, finetuning_task='xnli', cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, do_lower_case=model_args.do_lower_case, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, )
SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_tf=bool('.ckpt' in model_args.model_name_or_path ), config=_lowerCamelCase, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE ="""max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
SCREAMING_SNAKE_CASE =False
def preprocess_function(lowerCAmelCase_ ):
# Tokenize the texts
return tokenizer(
examples['premise'], examples['hypothesis'], padding=_lowerCamelCase, max_length=data_args.max_seq_length, truncation=_lowerCamelCase, )
if training_args.do_train:
if data_args.max_train_samples is not None:
SCREAMING_SNAKE_CASE =min(len(_lowerCamelCase ), data_args.max_train_samples )
SCREAMING_SNAKE_CASE =train_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='train dataset map pre-processing' ):
SCREAMING_SNAKE_CASE =train_dataset.map(
_lowerCamelCase, batched=_lowerCamelCase, load_from_cache_file=not data_args.overwrite_cache, desc='Running tokenizer on train dataset', )
# Log a few random samples from the training set:
for index in random.sample(range(len(_lowerCamelCase ) ), 3 ):
logger.info(F'Sample {index} of the training set: {train_dataset[index]}.' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
SCREAMING_SNAKE_CASE =min(len(_lowerCamelCase ), data_args.max_eval_samples )
SCREAMING_SNAKE_CASE =eval_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='validation dataset map pre-processing' ):
SCREAMING_SNAKE_CASE =eval_dataset.map(
_lowerCamelCase, batched=_lowerCamelCase, load_from_cache_file=not data_args.overwrite_cache, desc='Running tokenizer on validation dataset', )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
SCREAMING_SNAKE_CASE =min(len(_lowerCamelCase ), data_args.max_predict_samples )
SCREAMING_SNAKE_CASE =predict_dataset.select(range(_lowerCamelCase ) )
with training_args.main_process_first(desc='prediction dataset map pre-processing' ):
SCREAMING_SNAKE_CASE =predict_dataset.map(
_lowerCamelCase, batched=_lowerCamelCase, load_from_cache_file=not data_args.overwrite_cache, desc='Running tokenizer on prediction dataset', )
# Get the metric function
SCREAMING_SNAKE_CASE =evaluate.load('xnli' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =p.predictions[0] if isinstance(p.predictions, _lowerCamelCase ) else p.predictions
SCREAMING_SNAKE_CASE =np.argmax(_lowerCamelCase, axis=1 )
return metric.compute(predictions=_lowerCamelCase, references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
SCREAMING_SNAKE_CASE =default_data_collator
elif training_args.fpaa:
SCREAMING_SNAKE_CASE =DataCollatorWithPadding(_lowerCamelCase, pad_to_multiple_of=8 )
else:
SCREAMING_SNAKE_CASE =None
# Initialize our Trainer
SCREAMING_SNAKE_CASE =Trainer(
model=_lowerCamelCase, args=_lowerCamelCase, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=_lowerCamelCase, tokenizer=_lowerCamelCase, data_collator=_lowerCamelCase, )
# Training
if training_args.do_train:
SCREAMING_SNAKE_CASE =None
if training_args.resume_from_checkpoint is not None:
SCREAMING_SNAKE_CASE =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
SCREAMING_SNAKE_CASE =last_checkpoint
SCREAMING_SNAKE_CASE =trainer.train(resume_from_checkpoint=_lowerCamelCase )
SCREAMING_SNAKE_CASE =train_result.metrics
SCREAMING_SNAKE_CASE =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(_lowerCamelCase )
)
SCREAMING_SNAKE_CASE =min(_lowerCamelCase, len(_lowerCamelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('train', _lowerCamelCase )
trainer.save_metrics('train', _lowerCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
SCREAMING_SNAKE_CASE =trainer.evaluate(eval_dataset=_lowerCamelCase )
SCREAMING_SNAKE_CASE =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(_lowerCamelCase )
SCREAMING_SNAKE_CASE =min(_lowerCamelCase, len(_lowerCamelCase ) )
trainer.log_metrics('eval', _lowerCamelCase )
trainer.save_metrics('eval', _lowerCamelCase )
# Prediction
if training_args.do_predict:
logger.info('*** Predict ***' )
SCREAMING_SNAKE_CASE =trainer.predict(_lowerCamelCase, metric_key_prefix='predict' )
SCREAMING_SNAKE_CASE =(
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(_lowerCamelCase )
)
SCREAMING_SNAKE_CASE =min(_lowerCamelCase, len(_lowerCamelCase ) )
trainer.log_metrics('predict', _lowerCamelCase )
trainer.save_metrics('predict', _lowerCamelCase )
SCREAMING_SNAKE_CASE =np.argmax(_lowerCamelCase, axis=1 )
SCREAMING_SNAKE_CASE =os.path.join(training_args.output_dir, 'predictions.txt' )
if trainer.is_world_process_zero():
with open(_lowerCamelCase, 'w' ) as writer:
writer.write('index\tprediction\n' )
for index, item in enumerate(_lowerCamelCase ):
SCREAMING_SNAKE_CASE =label_list[item]
writer.write(F'{index}\t{item}\n' )
if __name__ == "__main__":
main()
| 334
|
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> int:
_lowerCAmelCase : Dict = list(range(0 ,_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
_lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,_lowerCamelCase ,_lowerCamelCase )]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
| 44
| 0
|
"""simple docstring"""
_a = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 61
|
"""simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'sayakpaul/vit-msn-base': 'https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class A_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = "vit_msn"
def __init__( self , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-06 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=True , **snake_case , ):
super().__init__(**a__ )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 195
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , a__=None , a__=None , a__=None ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = {}
if prompt is not None:
_lowerCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
_lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowerCAmelCase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ):
return super().__call__(a__ , **a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : List[str] = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Tuple = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Optional[int] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_lowerCAmelCase : Any = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Union[str, Any] = None
return model_inputs
def __A ( self , a__ , a__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowerCAmelCase : Optional[int] = None
if generate_kwargs is None:
_lowerCAmelCase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Union[str, Any] = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for output_ids in model_outputs:
_lowerCAmelCase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 44
| 0
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCAmelCase_ = logging.get_logger(__name__)
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: List[str] , __UpperCAmelCase: Any ) -> Union[str, Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def lowerCAmelCase_ ( __UpperCAmelCase: np.ndarray , __UpperCAmelCase: Optional[str] , __UpperCAmelCase: Optional[str] = None ) -> Optional[Any]:
UpperCamelCase__ : Dict = tesseract_config if tesseract_config is not None else """"""
# apply OCR
UpperCamelCase__ : Optional[Any] = to_pil_image(_lowerCamelCase )
UpperCamelCase__ : str = pil_image.size
UpperCamelCase__ : Dict = pytesseract.image_to_data(_lowerCamelCase , lang=_lowerCamelCase , output_type='''dict''' , config=_lowerCamelCase )
UpperCamelCase__ : Union[str, Any] = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
UpperCamelCase__ : List[str] = [idx for idx, word in enumerate(_lowerCamelCase ) if not word.strip()]
UpperCamelCase__ : List[Any] = [word for idx, word in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Tuple = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Dict = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Dict = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
UpperCamelCase__ : Optional[Any] = [coord for idx, coord in enumerate(_lowerCamelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase__ : List[str] = []
for x, y, w, h in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase__ : Optional[Any] = [x, y, x + w, y + h]
actual_boxes.append(_lowerCamelCase )
# finally, normalize the bounding boxes
UpperCamelCase__ : int = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) )
assert len(_lowerCamelCase ) == len(_lowerCamelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
a : Optional[int] = ["pixel_values"]
def __init__( self, __magic_name__ = True, __magic_name__ = None, __magic_name__ = PILImageResampling.BILINEAR, __magic_name__ = True, __magic_name__ = None, __magic_name__ = "", **__magic_name__, ) -> Optional[int]:
"""simple docstring"""
super().__init__(**a__ )
UpperCamelCase__ : Tuple = size if size is not None else {"""height""": 224, """width""": 224}
UpperCamelCase__ : Dict = get_size_dict(a__ )
UpperCamelCase__ : int = do_resize
UpperCamelCase__ : Optional[int] = size
UpperCamelCase__ : Optional[Any] = resample
UpperCamelCase__ : List[Any] = apply_ocr
UpperCamelCase__ : Dict = ocr_lang
UpperCamelCase__ : Tuple = tesseract_config
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = PILImageResampling.BILINEAR, __magic_name__ = None, **__magic_name__, ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = get_size_dict(a__ )
if "height" not in size or "width" not in size:
raise ValueError(f"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
UpperCamelCase__ : Optional[int] = (size["""height"""], size["""width"""])
return resize(a__, size=a__, resample=a__, data_format=a__, **a__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = None, __magic_name__ = ChannelDimension.FIRST, **__magic_name__, ) -> str:
"""simple docstring"""
UpperCamelCase__ : str = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ : str = size if size is not None else self.size
UpperCamelCase__ : Optional[Any] = get_size_dict(a__ )
UpperCamelCase__ : List[Any] = resample if resample is not None else self.resample
UpperCamelCase__ : Any = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase__ : Dict = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase__ : int = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase__ : str = make_list_of_images(a__ )
if not valid_images(a__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
# All transformations expect numpy arrays.
UpperCamelCase__ : Optional[int] = [to_numpy_array(a__ ) for image in images]
if apply_ocr:
requires_backends(self, '''pytesseract''' )
UpperCamelCase__ : List[str] = []
UpperCamelCase__ : Optional[Any] = []
for image in images:
UpperCamelCase__ : Union[str, Any] = apply_tesseract(a__, a__, a__ )
words_batch.append(a__ )
boxes_batch.append(a__ )
if do_resize:
UpperCamelCase__ : List[Any] = [self.resize(image=a__, size=a__, resample=a__ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
UpperCamelCase__ : Union[str, Any] = [flip_channel_order(a__ ) for image in images]
UpperCamelCase__ : Tuple = [to_channel_dimension_format(a__, a__ ) for image in images]
UpperCamelCase__ : Any = BatchFeature(data={'''pixel_values''': images}, tensor_type=a__ )
if apply_ocr:
UpperCamelCase__ : Optional[Any] = words_batch
UpperCamelCase__ : Optional[int] = boxes_batch
return data
| 201
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
'uw-madison/mra-base-512-4': 'https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json',
}
class _a ( SCREAMING_SNAKE_CASE_):
_a : str = "mra"
def __init__( self : Optional[int] , _SCREAMING_SNAKE_CASE : str=5_0265 , _SCREAMING_SNAKE_CASE : Tuple=768 , _SCREAMING_SNAKE_CASE : List[Any]=12 , _SCREAMING_SNAKE_CASE : Any=12 , _SCREAMING_SNAKE_CASE : Optional[int]=3072 , _SCREAMING_SNAKE_CASE : Any="gelu" , _SCREAMING_SNAKE_CASE : Any=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=0.1 , _SCREAMING_SNAKE_CASE : Union[str, Any]=512 , _SCREAMING_SNAKE_CASE : List[Any]=1 , _SCREAMING_SNAKE_CASE : Tuple=0.02 , _SCREAMING_SNAKE_CASE : Optional[int]=1E-5 , _SCREAMING_SNAKE_CASE : Any="absolute" , _SCREAMING_SNAKE_CASE : List[Any]=4 , _SCREAMING_SNAKE_CASE : Union[str, Any]="full" , _SCREAMING_SNAKE_CASE : Optional[Any]=0 , _SCREAMING_SNAKE_CASE : List[Any]=0 , _SCREAMING_SNAKE_CASE : Dict=1 , _SCREAMING_SNAKE_CASE : Optional[int]=0 , _SCREAMING_SNAKE_CASE : Tuple=2 , **_SCREAMING_SNAKE_CASE : List[str] , )-> int:
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : List[Any] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Tuple = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[Any] = position_embedding_type
lowerCAmelCase__ : Dict = block_per_row
lowerCAmelCase__ : int = approx_mode
lowerCAmelCase__ : Dict = initial_prior_first_n_blocks
lowerCAmelCase__ : Any = initial_prior_diagonal_n_blocks
| 131
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_a : Tuple = logging.getLogger(__name__)
_a : Any = {'facebook/bart-base': BartForConditionalGeneration}
_a : List[str] = {'facebook/bart-base': BartTokenizer}
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : int = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" ,type=_lowerCamelCase ,default=5 ,help="""The maximum total input sequence length after tokenization.""" ,)
parser.add_argument(
"""--num_beams""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) ,)
parser.add_argument(
"""--model_name_or_path""" ,type=_lowerCamelCase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=_lowerCamelCase ,)
parser.add_argument(
"""--config_name""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Pretrained config name or path if not the same as model_name""" ,)
parser.add_argument(
"""--device""" ,type=_lowerCamelCase ,default="""cpu""" ,help="""Device where the model will be run""" ,)
parser.add_argument("""--output_file_path""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Where to store the final ONNX file.""" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any]="cpu" ) -> str:
_lowerCAmelCase : List[str] = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ) -> Tuple:
model.eval()
_lowerCAmelCase : str = None
_lowerCAmelCase : int = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
_lowerCAmelCase : List[Any] = """My friends are cool but they eat too many carbs."""
_lowerCAmelCase : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors="""pt""" ).to(model.device )
_lowerCAmelCase : Any = model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,num_beams=_lowerCamelCase ,max_length=_lowerCamelCase ,early_stopping=_lowerCamelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowerCamelCase ,(
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowerCamelCase ,opset_version=14 ,input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] ,output_names=["""output_ids"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} ,example_outputs=_lowerCamelCase ,)
logger.info("""Model exported to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : str = onnxruntime.InferenceSession(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = ort_sess.run(
_lowerCamelCase ,{
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_lowerCamelCase ),
"""max_length""": np.array(_lowerCamelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Any = parse_args()
_lowerCAmelCase : List[Any] = 5
_lowerCAmelCase : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Optional[Any] = torch.device(args.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = load_model_tokenizer(args.model_name_or_path ,_lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_lowerCamelCase )
if args.max_length:
_lowerCAmelCase : Dict = args.max_length
if args.num_beams:
_lowerCAmelCase : Dict = args.num_beams
if args.output_file_path:
_lowerCAmelCase : Any = args.output_file_path
else:
_lowerCAmelCase : Union[str, Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
| 0
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_UpperCamelCase = logging.get_logger(__name__)
# General docstring
_UpperCamelCase = 'RegNetConfig'
# Base docstring
_UpperCamelCase = 'facebook/regnet-y-040'
_UpperCamelCase = [1, 1088, 7, 7]
# Image classification docstring
_UpperCamelCase = 'facebook/regnet-y-040'
_UpperCamelCase = 'tabby, tabby cat'
_UpperCamelCase = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : int , _a : Optional[Any] , _a : int , _a : Optional[Any] = 3 , _a : Optional[int] = 1 , _a : List[str] = 1 , _a : List[Any] = "relu" , ) -> Union[str, Any]:
super().__init__()
__lowerCamelCase : str = nn.Convad(
a__ , a__ , kernel_size=a__ , stride=a__ , padding=kernel_size // 2 , groups=a__ , bias=a__ , )
__lowerCamelCase : str = nn.BatchNormad(a__ )
__lowerCamelCase : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def _lowercase ( self : List[Any] , _a : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase : List[str] = self.convolution(a__ )
__lowerCamelCase : Union[str, Any] = self.normalization(a__ )
__lowerCamelCase : List[Any] = self.activation(a__ )
return hidden_state
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] , _a : Optional[Any] ) -> Dict:
super().__init__()
__lowerCamelCase : Optional[int] = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__lowerCamelCase : List[Any] = config.num_channels
def _lowercase ( self : int , _a : Optional[int] ) -> str:
__lowerCamelCase : Tuple = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__lowerCamelCase : List[Any] = self.embedder(a__ )
return hidden_state
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : str , _a : Union[str, Any] , _a : Any , _a : Union[str, Any] = 2 ) -> int:
super().__init__()
__lowerCamelCase : Union[str, Any] = nn.Convad(a__ , a__ , kernel_size=1 , stride=a__ , bias=a__ )
__lowerCamelCase : Optional[int] = nn.BatchNormad(a__ )
def _lowercase ( self : int , _a : Optional[Any] ) -> int:
__lowerCamelCase : str = self.convolution(a__ )
__lowerCamelCase : Tuple = self.normalization(a__ )
return hidden_state
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _a : Optional[Any] , _a : List[str] ) -> int:
super().__init__()
__lowerCamelCase : Dict = nn.AdaptiveAvgPoolad((1, 1) )
__lowerCamelCase : Tuple = nn.Sequential(
nn.Convad(a__ , a__ , kernel_size=1 ) , nn.ReLU() , nn.Convad(a__ , a__ , kernel_size=1 ) , nn.Sigmoid() , )
def _lowercase ( self : str , _a : Optional[int] ) -> List[Any]:
# b c h w -> b c 1 1
__lowerCamelCase : Tuple = self.pooler(a__ )
__lowerCamelCase : int = self.attention(a__ )
__lowerCamelCase : Optional[Any] = hidden_state * attention
return hidden_state
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any , _a : str , _a : Tuple , _a : str , _a : Any = 1 ) -> Optional[Any]:
super().__init__()
__lowerCamelCase : Optional[Any] = in_channels != out_channels or stride != 1
__lowerCamelCase : Optional[Any] = max(1 , out_channels // config.groups_width )
__lowerCamelCase : Tuple = (
RegNetShortCut(a__ , a__ , stride=a__ ) if should_apply_shortcut else nn.Identity()
)
__lowerCamelCase : str = nn.Sequential(
RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(a__ , a__ , stride=a__ , groups=a__ , activation=config.hidden_act ) , RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=a__ ) , )
__lowerCamelCase : Union[str, Any] = ACTaFN[config.hidden_act]
def _lowercase ( self : Union[str, Any] , _a : int ) -> Tuple:
__lowerCamelCase : Any = hidden_state
__lowerCamelCase : Any = self.layer(a__ )
__lowerCamelCase : str = self.shortcut(a__ )
hidden_state += residual
__lowerCamelCase : int = self.activation(a__ )
return hidden_state
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _a : Dict , _a : List[Any] , _a : Union[str, Any] , _a : Any = 1 ) -> Optional[Any]:
super().__init__()
__lowerCamelCase : Dict = in_channels != out_channels or stride != 1
__lowerCamelCase : List[Any] = max(1 , out_channels // config.groups_width )
__lowerCamelCase : List[Any] = (
RegNetShortCut(a__ , a__ , stride=a__ ) if should_apply_shortcut else nn.Identity()
)
__lowerCamelCase : Tuple = nn.Sequential(
RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(a__ , a__ , stride=a__ , groups=a__ , activation=config.hidden_act ) , RegNetSELayer(a__ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(a__ , a__ , kernel_size=1 , activation=a__ ) , )
__lowerCamelCase : Dict = ACTaFN[config.hidden_act]
def _lowercase ( self : List[Any] , _a : Optional[Any] ) -> str:
__lowerCamelCase : List[str] = hidden_state
__lowerCamelCase : Optional[int] = self.layer(a__ )
__lowerCamelCase : List[Any] = self.shortcut(a__ )
hidden_state += residual
__lowerCamelCase : List[str] = self.activation(a__ )
return hidden_state
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , _a : Union[str, Any] , _a : str , _a : Union[str, Any] , _a : Union[str, Any] = 2 , _a : Union[str, Any] = 2 , ) -> Dict:
super().__init__()
__lowerCamelCase : List[Any] = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
__lowerCamelCase : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
a__ , a__ , a__ , stride=a__ , ) , *[layer(a__ , a__ , a__ ) for _ in range(depth - 1 )] , )
def _lowercase ( self : Optional[Any] , _a : Optional[int] ) -> str:
__lowerCamelCase : List[Any] = self.layers(a__ )
return hidden_state
class lowerCamelCase_ ( nn.Module ):
"""simple docstring"""
def __init__( self : Union[str, Any] , _a : Optional[int] ) -> Dict:
super().__init__()
__lowerCamelCase : Optional[int] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
a__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowerCamelCase : List[str] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(a__ , config.depths[1:] ):
self.stages.append(RegNetStage(a__ , a__ , a__ , depth=a__ ) )
def _lowercase ( self : Any , _a : Optional[int] , _a : Dict = False , _a : Dict = True ) -> Tuple:
__lowerCamelCase : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase : Union[str, Any] = hidden_states + (hidden_state,)
__lowerCamelCase : Union[str, Any] = stage_module(a__ )
if output_hidden_states:
__lowerCamelCase : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=a__ , hidden_states=a__ )
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ =RegNetConfig
a_ ="regnet"
a_ ="pixel_values"
a_ =True
def _lowercase ( self : List[str] , _a : List[str] ) -> Dict:
if isinstance(a__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(a__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def _lowercase ( self : Any , _a : Optional[int] , _a : Any=False ) -> Any:
if isinstance(a__ , a__ ):
__lowerCamelCase : Optional[int] = value
_UpperCamelCase = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_UpperCamelCase = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , SCREAMING_SNAKE_CASE_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : List[Any] , _a : List[Any] ) -> str:
super().__init__(a__ )
__lowerCamelCase : List[Any] = config
__lowerCamelCase : Any = RegNetEmbeddings(a__ )
__lowerCamelCase : List[str] = RegNetEncoder(a__ )
__lowerCamelCase : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _lowercase ( self : List[str] , _a : Optional[Any] , _a : Optional[Any] = None , _a : Dict = None ) -> Dict:
__lowerCamelCase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : List[Any] = self.embedder(a__ )
__lowerCamelCase : Optional[Any] = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ )
__lowerCamelCase : str = encoder_outputs[0]
__lowerCamelCase : Optional[Any] = self.pooler(a__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a__ , pooler_output=a__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , SCREAMING_SNAKE_CASE_ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class lowerCamelCase_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self : Any , _a : Dict ) -> Optional[Any]:
super().__init__(a__ )
__lowerCamelCase : Any = config.num_labels
__lowerCamelCase : Optional[int] = RegNetModel(a__ )
# classification head
__lowerCamelCase : Optional[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _lowercase ( self : Dict , _a : Optional[Any] = None , _a : Tuple = None , _a : List[str] = None , _a : str = None , ) -> Any:
__lowerCamelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Union[str, Any] = self.regnet(a__ , output_hidden_states=a__ , return_dict=a__ )
__lowerCamelCase : str = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : str = self.classifier(a__ )
__lowerCamelCase : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase : str = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase : Optional[int] = """single_label_classification"""
else:
__lowerCamelCase : Optional[int] = """multi_label_classification"""
if self.config.problem_type == "regression":
__lowerCamelCase : Optional[Any] = MSELoss()
if self.num_labels == 1:
__lowerCamelCase : int = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCamelCase : Dict = loss_fct(a__ , a__ )
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase : Any = CrossEntropyLoss()
__lowerCamelCase : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase : Dict = BCEWithLogitsLoss()
__lowerCamelCase : Tuple = loss_fct(a__ , a__ )
if not return_dict:
__lowerCamelCase : Optional[int] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
| 208
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44
| 0
|
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
lowercase : Dict = 0
lowercase : Optional[Any] = str(_lowerCamelCase )
while len(_lowerCamelCase ) != 1:
lowercase : int = [int(_lowerCamelCase ) for i in num_string]
lowercase : Dict = 1
for i in range(0 , len(_lowerCamelCase ) ):
total *= numbers[i]
lowercase : Tuple = str(_lowerCamelCase )
steps += 1
return steps
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> int:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
lowercase : List[str] = 0
lowercase : Any = str(_lowerCamelCase )
while len(_lowerCamelCase ) != 1:
lowercase : Optional[int] = [int(_lowerCamelCase ) for i in num_string]
lowercase : str = 0
for i in range(0 , len(_lowerCamelCase ) ):
total += numbers[i]
lowercase : Tuple = str(_lowerCamelCase )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44
| 0
|
"""simple docstring"""
from collections import defaultdict
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[Any] = total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
__a : Any = [
[-1 for i in range(total + 1 )] for j in range(2 ** len(a__ ) )
]
__a : Dict = defaultdict(a__ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
__a : Union[str, Any] = (1 << len(a__ )) - 1
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
__a : Any = self.count_ways_until(a__ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
__a : List[str] = total_ways_util
return self.dp[mask][task_no]
def _lowerCamelCase ( self , _UpperCAmelCase ):
# Store the list of persons for each task
for i in range(len(a__ ) ):
for j in task_performed[i]:
self.task[j].append(a__ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
A = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
A = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 160
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44
| 0
|
"""simple docstring"""
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
UpperCAmelCase_ : str = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
UpperCAmelCase_ : str = 128022
UpperCAmelCase_ : Optional[Any] = 128028
@require_sentencepiece
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = MaMaaaTokenizer
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = True
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
super().setUp()
SCREAMING_SNAKE_CASE_ : Optional[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
SCREAMING_SNAKE_CASE_ : Dict = dict(zip(a__ , range(len(a__))))
SCREAMING_SNAKE_CASE_ : List[Any] = Path(self.tmpdirname)
save_json(a__ , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(a__ , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
SCREAMING_SNAKE_CASE_ : Any = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self : str , **lowercase_ : List[str]):
'''simple docstring'''
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **a__)
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : Optional[int]):
'''simple docstring'''
return (
"This is a test",
"This is a test",
)
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = """</s>"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__) , a__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__) , a__)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Any = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<s>''')
self.assertEqual(len(a__) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.tokenize('''This is a test''')
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__) , [2, 3, 4, 5, 6] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(a__ , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''])
SCREAMING_SNAKE_CASE_ : int = tokenizer.convert_tokens_to_string(a__)
self.assertEqual(a__ , '''This is a test''')
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = {"""input_ids""": [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "facebook/m2m100_418M"
__UpperCamelCase = [
"In my opinion, there are two levels of response from the French government.",
"NSA Affair Emphasizes Complete Lack of Debate on Intelligence",
]
__UpperCamelCase = [
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
]
# fmt: off
__UpperCamelCase = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''')
SCREAMING_SNAKE_CASE_ : Tuple = 1
return cls
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063)
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.get_vocab()
self.assertEqual(len(a__) , self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] , 3)
self.assertIn(self.tokenizer.get_lang_token('''en''') , a__)
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = """en"""
SCREAMING_SNAKE_CASE_ : int = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , a__)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
self.assertIn(a__ , self.tokenizer.all_special_ids)
# fmt: off
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
SCREAMING_SNAKE_CASE_ : str = self.tokenizer.decode(a__ , skip_special_tokens=a__)
SCREAMING_SNAKE_CASE_ : List[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=a__)
self.assertEqual(a__ , a__)
self.assertNotIn(self.tokenizer.eos_token , a__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(a__)
SCREAMING_SNAKE_CASE_ : Tuple = MaMaaaTokenizer.from_pretrained(a__)
self.assertDictEqual(new_tok.lang_token_to_id , a__)
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = """en"""
SCREAMING_SNAKE_CASE_ : int = """fr"""
SCREAMING_SNAKE_CASE_ : Any = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=a__ , return_tensors='''pt''')
SCREAMING_SNAKE_CASE_ : int = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
SCREAMING_SNAKE_CASE_ : Any = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """mr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
SCREAMING_SNAKE_CASE_ : List[Any] = """zh"""
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """mr"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
SCREAMING_SNAKE_CASE_ : Union[str, Any] = """zh"""
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''')
self.assertEqual(
nested_simplify(a__) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 91
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __UpperCamelCase :
lowerCamelCase : Any =XGLMConfig
lowerCamelCase : List[Any] ={}
lowerCamelCase : Optional[int] ="gelu"
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=14 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=99 , lowerCAmelCase__=32 , lowerCAmelCase__=2 , lowerCAmelCase__=4 , lowerCAmelCase__=37 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=512 , lowerCAmelCase__=0.02 , ) -> str:
a : Optional[int] = parent
a : int = batch_size
a : Optional[Any] = seq_length
a : Any = is_training
a : Optional[int] = use_input_mask
a : str = use_labels
a : Any = vocab_size
a : Optional[int] = d_model
a : int = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Union[str, Any] = ffn_dim
a : Any = activation_function
a : Tuple = activation_dropout
a : int = attention_dropout
a : Optional[int] = max_position_embeddings
a : str = initializer_range
a : List[Any] = None
a : Tuple = 0
a : str = 2
a : Optional[int] = 1
def __a ( self ) -> Any:
return XGLMConfig.from_pretrained("facebook/xglm-564M" )
def __a ( self ) -> Optional[int]:
a : Dict = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
a : Tuple = None
if self.use_input_mask:
a : Dict = random_attention_mask([self.batch_size, self.seq_length] )
a : int = self.get_config()
a : int = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def __a ( self ) -> Union[str, Any]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=a__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=a__ , )
def __a ( self ) -> Optional[int]:
a : Tuple = self.prepare_config_and_inputs()
(
a
) : List[str] = config_and_inputs
a : Tuple = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class __UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
lowerCamelCase : Optional[Any] =(TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase : List[str] =(TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase : Optional[Any] =(
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase : Any =False
lowerCamelCase : int =False
lowerCamelCase : Union[str, Any] =False
def __a ( self ) -> int:
a : List[str] = TFXGLMModelTester(self )
a : Optional[Any] = ConfigTester(self , config_class=a__ , n_embd=37 )
def __a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@slow
def __a ( self ) -> int:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Any = TFXGLMModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor." )
def __a ( self ) -> int:
super().test_resize_token_embeddings()
@require_tf
class __UpperCamelCase ( unittest.TestCase ):
@slow
def __a ( self , lowerCAmelCase__=True ) -> Optional[int]:
a : List[str] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
a : Tuple = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
a : Any = [2, 268, 9865, 67, 11, 1988, 5_7252, 9865, 5, 984, 67, 1988, 21_3838, 1658, 53, 7_0446, 33, 6657, 278, 1581]
# fmt: on
a : str = model.generate(a__ , do_sample=a__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , a__ )
@slow
def __a ( self ) -> int:
a : str = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
a : Tuple = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
tf.random.set_seed(0 )
a : List[Any] = tokenizer("Today is a nice day and" , return_tensors="tf" )
a : Optional[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0" ):
a : List[str] = model.generate(a__ , do_sample=a__ , seed=[7, 0] )
a : int = tokenizer.decode(output_ids[0] , skip_special_tokens=a__ )
a : Union[str, Any] = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(a__ , a__ )
@slow
def __a ( self ) -> Any:
a : Union[str, Any] = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M" )
a : List[Any] = XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
a : Optional[Any] = """left"""
# use different length sentences to test batching
a : List[Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
a : int = tokenizer(a__ , return_tensors="tf" , padding=a__ )
a : Optional[int] = inputs["""input_ids"""]
a : List[Any] = model.generate(input_ids=a__ , attention_mask=inputs["attention_mask"] , max_new_tokens=12 )
a : Any = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
a : List[Any] = model.generate(input_ids=a__ , max_new_tokens=12 )
a : str = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
a : Optional[Any] = model.generate(input_ids=a__ , max_new_tokens=12 )
a : Any = tokenizer.batch_decode(a__ , skip_special_tokens=a__ )
a : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=a__ )
a : Optional[int] = tokenizer.decode(output_padded[0] , skip_special_tokens=a__ )
a : Dict = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(a__ , a__ )
self.assertListEqual(a__ , [non_padded_sentence, padded_sentence] )
| 105
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44
| 0
|
import itertools
import string
from collections.abc import Generator, Iterable
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Iterable[str] , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : List[Any] = iter(_lowerCamelCase )
while True:
lowercase__ : Optional[Any] = tuple(itertools.islice(_lowerCamelCase , _lowerCamelCase ) )
if not chunk:
return
yield chunk
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : str = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
lowercase__ : Dict = """"""
if len(_lowerCamelCase ) < 2:
return dirty
for i in range(len(_lowerCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowerCamelCase ) & 1:
clean += "X"
return clean
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
lowercase__ : Tuple = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowerCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowerCamelCase )
return table
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : List[str] = generate_table(_lowerCamelCase )
lowercase__ : Union[str, Any] = prepare_input(_lowerCamelCase )
lowercase__ : Union[str, Any] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCamelCase , 2 ):
lowercase__ : Optional[Any] = divmod(table.index(_lowerCamelCase ) , 5 )
lowercase__ : Dict = divmod(table.index(_lowerCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
lowercase__ : Union[str, Any] = generate_table(_lowerCamelCase )
lowercase__ : Dict = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCamelCase , 2 ):
lowercase__ : Dict = divmod(table.index(_lowerCamelCase ) , 5 )
lowercase__ : Union[str, Any] = divmod(table.index(_lowerCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 214
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 50 ) -> int:
_lowerCAmelCase : int = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
| 0
|
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class a_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a__ ,'hidden_sizes' ) )
self.parent.assertTrue(hasattr(a__ ,'num_attention_heads' ) )
class a_ :
"""simple docstring"""
def __init__( self : Any ,snake_case : Tuple ,snake_case : List[Any]=13 ,snake_case : Any=64 ,snake_case : List[Any]=3 ,snake_case : int=3 ,snake_case : Union[str, Any]=2 ,snake_case : Dict=1 ,snake_case : Union[str, Any]=16 ,snake_case : Dict=[128, 256, 384] ,snake_case : int=[4, 6, 8] ,snake_case : Dict=[2, 3, 4] ,snake_case : int=[16, 16, 16] ,snake_case : Union[str, Any]=0 ,snake_case : Tuple=[2, 2, 2] ,snake_case : Any=[2, 2, 2] ,snake_case : Union[str, Any]=0.02 ,snake_case : Optional[Any]=True ,snake_case : List[Any]=True ,snake_case : Dict=2 ,):
SCREAMING_SNAKE_CASE =parent
SCREAMING_SNAKE_CASE =batch_size
SCREAMING_SNAKE_CASE =image_size
SCREAMING_SNAKE_CASE =num_channels
SCREAMING_SNAKE_CASE =kernel_size
SCREAMING_SNAKE_CASE =stride
SCREAMING_SNAKE_CASE =padding
SCREAMING_SNAKE_CASE =hidden_sizes
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =depths
SCREAMING_SNAKE_CASE =key_dim
SCREAMING_SNAKE_CASE =drop_path_rate
SCREAMING_SNAKE_CASE =patch_size
SCREAMING_SNAKE_CASE =attention_ratio
SCREAMING_SNAKE_CASE =mlp_ratio
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =[
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
SCREAMING_SNAKE_CASE =is_training
SCREAMING_SNAKE_CASE =use_labels
SCREAMING_SNAKE_CASE =num_labels
SCREAMING_SNAKE_CASE =initializer_range
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE =None
if self.use_labels:
SCREAMING_SNAKE_CASE =ids_tensor([self.batch_size] ,self.num_labels )
SCREAMING_SNAKE_CASE =self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Dict ):
return LevitConfig(
image_size=self.image_size ,num_channels=self.num_channels ,kernel_size=self.kernel_size ,stride=self.stride ,padding=self.padding ,patch_size=self.patch_size ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,depths=self.depths ,key_dim=self.key_dim ,drop_path_rate=self.drop_path_rate ,mlp_ratio=self.mlp_ratio ,attention_ratio=self.attention_ratio ,initializer_range=self.initializer_range ,down_ops=self.down_ops ,)
def _lowerCAmelCase ( self : int ,snake_case : Union[str, Any] ,snake_case : Optional[int] ,snake_case : Optional[int] ):
SCREAMING_SNAKE_CASE =LevitModel(config=a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE =model(a__ )
SCREAMING_SNAKE_CASE =(self.image_size, self.image_size)
SCREAMING_SNAKE_CASE =image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE =floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
SCREAMING_SNAKE_CASE =floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) ,)
def _lowerCAmelCase ( self : Any ,snake_case : List[str] ,snake_case : Optional[Any] ,snake_case : str ):
SCREAMING_SNAKE_CASE =self.num_labels
SCREAMING_SNAKE_CASE =LevitForImageClassification(a__ )
model.to(a__ )
model.eval()
SCREAMING_SNAKE_CASE =model(a__ ,labels=a__ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE =config_and_inputs
SCREAMING_SNAKE_CASE ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =LevitModelTester(self )
SCREAMING_SNAKE_CASE =ConfigTester(self ,config_class=a__ ,has_text_modality=a__ ,hidden_size=37 )
def _lowerCAmelCase ( self : List[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : Optional[int] ):
return
@unittest.skip(reason='Levit does not use inputs_embeds' )
def _lowerCAmelCase ( self : Dict ):
pass
@unittest.skip(reason='Levit does not support input and output embeddings' )
def _lowerCAmelCase ( self : Optional[Any] ):
pass
@unittest.skip(reason='Levit does not output attentions' )
def _lowerCAmelCase ( self : Dict ):
pass
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =model_class(a__ )
SCREAMING_SNAKE_CASE =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE =["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,a__ )
def _lowerCAmelCase ( self : Union[str, Any] ):
def check_hidden_states_output(snake_case : Any ,snake_case : Optional[Any] ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**self._prepare_for_class(a__ ,a__ ) )
SCREAMING_SNAKE_CASE =outputs.hidden_states
SCREAMING_SNAKE_CASE =len(self.model_tester.depths ) + 1
self.assertEqual(len(a__ ) ,a__ )
SCREAMING_SNAKE_CASE =(self.model_tester.image_size, self.model_tester.image_size)
SCREAMING_SNAKE_CASE =image_size[0], image_size[1]
for _ in range(4 ):
SCREAMING_SNAKE_CASE =floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
SCREAMING_SNAKE_CASE =floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[
height * width,
self.model_tester.hidden_sizes[0],
] ,)
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE =True
check_hidden_states_output(a__ ,a__ ,a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE =True
check_hidden_states_output(a__ ,a__ ,a__ )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCAmelCase ( self : Optional[Any] ):
pass
def _lowerCAmelCase ( self : Any ,snake_case : Tuple ,snake_case : Union[str, Any] ,snake_case : Any=False ):
SCREAMING_SNAKE_CASE =super()._prepare_for_class(a__ ,a__ ,return_labels=a__ )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def _lowerCAmelCase ( self : List[Any] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(a__ )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
SCREAMING_SNAKE_CASE =model_class(a__ )
model.to(a__ )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(a__ ,a__ ,return_labels=a__ )
SCREAMING_SNAKE_CASE =model(**a__ ).loss
loss.backward()
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE =False
SCREAMING_SNAKE_CASE =True
for model_class in self.all_model_classes:
if model_class in get_values(a__ ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
SCREAMING_SNAKE_CASE =model_class(a__ )
model.gradient_checkpointing_enable()
model.to(a__ )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(a__ ,a__ ,return_labels=a__ )
SCREAMING_SNAKE_CASE =model(**a__ ).loss
loss.backward()
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE =[
{"""title""": """multi_label_classification""", """num_labels""": 2, """dtype""": torch.float},
{"""title""": """single_label_classification""", """num_labels""": 1, """dtype""": torch.long},
{"""title""": """regression""", """num_labels""": 1, """dtype""": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(a__ ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ):
SCREAMING_SNAKE_CASE =problem_type["""title"""]
SCREAMING_SNAKE_CASE =problem_type["""num_labels"""]
SCREAMING_SNAKE_CASE =model_class(a__ )
model.to(a__ )
model.train()
SCREAMING_SNAKE_CASE =self._prepare_for_class(a__ ,a__ ,return_labels=a__ )
if problem_type["num_labels"] > 1:
SCREAMING_SNAKE_CASE =inputs["""labels"""].unsqueeze(1 ).repeat(1 ,problem_type['num_labels'] )
SCREAMING_SNAKE_CASE =inputs["""labels"""].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=a__ ) as warning_list:
SCREAMING_SNAKE_CASE =model(**a__ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'Something is going wrong in the regression problem: intercepted {w.message}' )
loss.backward()
@slow
def _lowerCAmelCase ( self : int ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE =LevitModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def snake_case__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : int ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
a__ )
SCREAMING_SNAKE_CASE =self.default_image_processor
SCREAMING_SNAKE_CASE =prepare_img()
SCREAMING_SNAKE_CASE =image_processor(images=a__ ,return_tensors='pt' ).to(a__ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE =model(**a__ )
# verify the logits
SCREAMING_SNAKE_CASE =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,a__ )
SCREAMING_SNAKE_CASE =torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,a__ ,atol=1e-4 ) )
| 334
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44
| 0
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ (SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = KandinskyImgaImgPipeline
SCREAMING_SNAKE_CASE__ : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
SCREAMING_SNAKE_CASE__ : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
SCREAMING_SNAKE_CASE__ : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return 100
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
UpperCAmelCase_ : int = MultilingualCLIP(a__ )
UpperCAmelCase_ : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
UpperCAmelCase_ : Optional[Any] = UNetaDConditionModel(**a__ )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase_ : str = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase_ : List[Any] = self.dummy_tokenizer
UpperCAmelCase_ : int = self.dummy_unet
UpperCAmelCase_ : Dict = self.dummy_movq
UpperCAmelCase_ : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_00_85,
"""beta_end""": 0.0_12,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(**a__ )
UpperCAmelCase_ : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def UpperCamelCase__ ( self , lowercase_ , lowercase_=0 ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
UpperCAmelCase_ : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
UpperCAmelCase_ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("RGB" ).resize((256, 256) )
if str(a__ ).startswith("mps" ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(a__ )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
UpperCAmelCase_ : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = """cpu"""
UpperCAmelCase_ : int = self.get_dummy_components()
UpperCAmelCase_ : int = self.pipeline_class(**a__ )
UpperCAmelCase_ : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
UpperCAmelCase_ : Tuple = pipe(**self.get_dummy_inputs(a__ ) )
UpperCAmelCase_ : List[Any] = output.images
UpperCAmelCase_ : Tuple = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : str = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
UpperCAmelCase_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCAmelCase_ : Union[str, Any] = """A red cartoon frog, 4k"""
UpperCAmelCase_ : int = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
UpperCAmelCase_ : Tuple = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
UpperCAmelCase_ : Any = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
UpperCAmelCase_ : Any = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ : Dict = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCAmelCase_ : Union[str, Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="np" , )
UpperCAmelCase_ : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 61
|
"""simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 0
|
UpperCAmelCase = tuple[float, float, float]
UpperCAmelCase = tuple[float, float, float]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = end_pointa[0] - end_pointa[0]
lowercase = end_pointa[1] - end_pointa[1]
lowercase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return tuple(round(_lowerCamelCase , _lowerCamelCase ) for x in vector ) == (0, 0, 0)
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 10 ):
lowercase = create_vector(_lowerCamelCase , _lowerCamelCase )
lowercase = create_vector(_lowerCamelCase , _lowerCamelCase )
return is_zero_vector(get_ad_vectors_cross(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
| 195
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
| 0
|
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
UpperCAmelCase_ = logging.get_logger(__name__)
enable_full_determinism()
class lowercase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
a : Optional[int] = UNetaDModel
a : int = "sample"
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = 4
UpperCamelCase__ : Optional[int] = 3
UpperCamelCase__ : List[str] = (32, 32)
UpperCamelCase__ : str = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
UpperCamelCase__ : Dict = torch.tensor([10] ).to(a__ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : str = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
UpperCamelCase__ : int = self.dummy_input
return init_dict, inputs_dict
class lowercase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
a : Tuple = UNetaDModel
a : Dict = "sample"
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = 4
UpperCamelCase__ : int = 4
UpperCamelCase__ : Optional[int] = (32, 32)
UpperCamelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
UpperCamelCase__ : int = torch.tensor([10] ).to(a__ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return (4, 32, 32)
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return (4, 32, 32)
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Dict = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
UpperCamelCase__ : int = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''', output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(a__ )
UpperCamelCase__ : Any = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''', '''This test is supposed to run on GPU''' )
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : Any = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''', output_loading_info=a__ )
model.to(a__ )
UpperCamelCase__ : Union[str, Any] = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != '''cuda''', '''This test is supposed to run on GPU''' )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
UpperCamelCase__ : List[Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''', output_loading_info=a__ )
model_accelerate.to(a__ )
model_accelerate.eval()
UpperCamelCase__ : int = torch.randn(
1, model_accelerate.config.in_channels, model_accelerate.config.sample_size, model_accelerate.config.sample_size, generator=torch.manual_seed(0 ), )
UpperCamelCase__ : Optional[int] = noise.to(a__ )
UpperCamelCase__ : Dict = torch.tensor([10] * noise.shape[0] ).to(a__ )
UpperCamelCase__ : Dict = model_accelerate(a__, a__ )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
UpperCamelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(
'''fusing/unet-ldm-dummy-update''', output_loading_info=a__, low_cpu_mem_usage=a__ )
model_normal_load.to(a__ )
model_normal_load.eval()
UpperCamelCase__ : Dict = model_normal_load(a__, a__ )["""sample"""]
assert torch_all_close(a__, a__, rtol=1E-3 )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = UNetaDModel.from_pretrained('''fusing/unet-ldm-dummy-update''' )
model.eval()
model.to(a__ )
UpperCamelCase__ : List[Any] = torch.randn(
1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0 ), )
UpperCamelCase__ : List[str] = noise.to(a__ )
UpperCamelCase__ : Any = torch.tensor([10] * noise.shape[0] ).to(a__ )
with torch.no_grad():
UpperCamelCase__ : Union[str, Any] = model(a__, a__ ).sample
UpperCamelCase__ : int = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
UpperCamelCase__ : Optional[int] = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800] )
# fmt: on
self.assertTrue(torch_all_close(a__, a__, rtol=1E-3 ) )
class lowercase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
a : List[str] = UNetaDModel
a : Any = "sample"
@property
def UpperCamelCase__ ( self, __magic_name__=(32, 32) ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = 4
UpperCamelCase__ : Dict = 3
UpperCamelCase__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes ).to(a__ )
UpperCamelCase__ : Any = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa, device=a__ )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
return (3, 32, 32)
@property
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return (3, 32, 32)
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1E-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
UpperCamelCase__ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''', output_loading_info=a__ )
self.assertIsNotNone(a__ )
self.assertEqual(len(loading_info['''missing_keys'''] ), 0 )
model.to(a__ )
UpperCamelCase__ : str = self.dummy_input
UpperCamelCase__ : Optional[Any] = floats_tensor((4, 3) + (256, 256) ).to(a__ )
UpperCamelCase__ : Any = noise
UpperCamelCase__ : str = model(**a__ )
assert image is not None, "Make sure output is not None"
@slow
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = UNetaDModel.from_pretrained('''google/ncsnpp-celebahq-256''' )
model.to(a__ )
UpperCamelCase__ : str = 4
UpperCamelCase__ : List[str] = 3
UpperCamelCase__ : Any = (256, 256)
UpperCamelCase__ : List[Any] = torch.ones((batch_size, num_channels) + sizes ).to(a__ )
UpperCamelCase__ : str = torch.tensor(batch_size * [1E-4] ).to(a__ )
with torch.no_grad():
UpperCamelCase__ : Optional[Any] = model(a__, a__ ).sample
UpperCamelCase__ : Optional[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase__ : int = torch.tensor([-4842.8691, -6499.6631, -3800.1953, -7978.2686, -10980.7129, -20028.8535, 8148.2822, 2342.2905, 567.7608] )
# fmt: on
self.assertTrue(torch_all_close(a__, a__, rtol=1E-2 ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : int = UNetaDModel.from_pretrained('''fusing/ncsnpp-ffhq-ve-dummy-update''' )
model.to(a__ )
UpperCamelCase__ : Union[str, Any] = 4
UpperCamelCase__ : Tuple = 3
UpperCamelCase__ : int = (32, 32)
UpperCamelCase__ : List[str] = torch.ones((batch_size, num_channels) + sizes ).to(a__ )
UpperCamelCase__ : List[Any] = torch.tensor(batch_size * [1E-4] ).to(a__ )
with torch.no_grad():
UpperCamelCase__ : int = model(a__, a__ ).sample
UpperCamelCase__ : Tuple = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
UpperCamelCase__ : Optional[Any] = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256] )
# fmt: on
self.assertTrue(torch_all_close(a__, a__, rtol=1E-2 ) )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
# not required for this model
pass
| 201
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44
| 0
|
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCamelCase = 'pt'
elif is_tf_available():
lowerCamelCase = 'tf'
else:
lowerCamelCase = 'jax'
class _a ( SCREAMING_SNAKE_CASE_ , unittest.TestCase):
_a : List[Any] = PerceiverTokenizer
_a : Optional[int] = False
def UpperCAmelCase__( self : Union[str, Any] )-> Union[str, Any]:
super().setUp()
lowerCAmelCase__ : Union[str, Any] = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase__( self : Tuple )-> Dict:
return PerceiverTokenizer.from_pretrained('''deepmind/language-perceiver''' )
def UpperCAmelCase__( self : Union[str, Any] , **_SCREAMING_SNAKE_CASE : List[str] )-> Optional[int]:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a__ )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : List[str]=20 , _SCREAMING_SNAKE_CASE : Optional[int]=5 )-> List[str]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
lowerCAmelCase__ : List[str] = []
for i in range(len(a__ ) ):
try:
lowerCAmelCase__ : int = tokenizer.decode([i] , clean_up_tokenization_spaces=a__ )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
lowerCAmelCase__ : int = list(filter(lambda _SCREAMING_SNAKE_CASE : re.match(r'''^[ a-zA-Z]+$''' , t[1] ) , a__ ) )
lowerCAmelCase__ : Union[str, Any] = list(filter(lambda _SCREAMING_SNAKE_CASE : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=a__ ) , a__ ) )
if max_length is not None and len(a__ ) > max_length:
lowerCAmelCase__ : Optional[int] = toks[:max_length]
if min_length is not None and len(a__ ) < min_length and len(a__ ) > 0:
while len(a__ ) < min_length:
lowerCAmelCase__ : Dict = toks + toks
# toks_str = [t[1] for t in toks]
lowerCAmelCase__ : Union[str, Any] = [t[0] for t in toks]
# Ensure consistency
lowerCAmelCase__ : Tuple = tokenizer.decode(a__ , clean_up_tokenization_spaces=a__ )
if " " not in output_txt and len(a__ ) > 1:
lowerCAmelCase__ : Dict = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=a__ )
+ """ """
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=a__ )
)
if with_prefix_space:
lowerCAmelCase__ : Tuple = """ """ + output_txt
lowerCAmelCase__ : Optional[Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
return output_txt, output_ids
def UpperCAmelCase__( self : List[str] )-> List[str]:
lowerCAmelCase__ : int = self.perceiver_tokenizer
lowerCAmelCase__ : Optional[int] = """Unicode €."""
lowerCAmelCase__ : Union[str, Any] = tokenizer(a__ )
lowerCAmelCase__ : List[Any] = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded['''input_ids'''] , a__ )
# decoding
lowerCAmelCase__ : Union[str, Any] = tokenizer.decode(a__ )
self.assertEqual(a__ , '''[CLS]Unicode €.[SEP]''' )
lowerCAmelCase__ : Dict = tokenizer('''e è é ê ë''' )
lowerCAmelCase__ : int = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded['''input_ids'''] , a__ )
# decoding
lowerCAmelCase__ : List[str] = tokenizer.decode(a__ )
self.assertEqual(a__ , '''[CLS]e è é ê ë[SEP]''' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('''e è é ê ë''' ) ) , '''[CLS]e è é ê ë[SEP]''' )
def UpperCAmelCase__( self : Optional[Any] )-> str:
lowerCAmelCase__ : str = self.perceiver_tokenizer
lowerCAmelCase__ : Any = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
# fmt: off
lowerCAmelCase__ : Any = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
lowerCAmelCase__ : List[str] = tokenizer(a__ , padding=a__ , return_tensors=a__ )
self.assertIsInstance(a__ , a__ )
if FRAMEWORK != "jax":
lowerCAmelCase__ : str = list(batch.input_ids.numpy()[0] )
else:
lowerCAmelCase__ : List[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(a__ , a__ )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def UpperCAmelCase__( self : List[str] )-> Dict:
lowerCAmelCase__ : Optional[Any] = self.perceiver_tokenizer
lowerCAmelCase__ : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowerCAmelCase__ : Optional[Any] = tokenizer(a__ , padding=a__ , return_tensors=a__ )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('''input_ids''' , a__ )
self.assertIn('''attention_mask''' , a__ )
self.assertNotIn('''decoder_input_ids''' , a__ )
self.assertNotIn('''decoder_attention_mask''' , a__ )
def UpperCAmelCase__( self : Optional[Any] )-> Dict:
lowerCAmelCase__ : Dict = self.perceiver_tokenizer
lowerCAmelCase__ : int = [
"""Summary of the text.""",
"""Another summary.""",
]
lowerCAmelCase__ : Optional[int] = tokenizer(
text_target=a__ , max_length=32 , padding='''max_length''' , truncation=a__ , return_tensors=a__ )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def UpperCAmelCase__( self : List[Any] )-> Tuple:
# safety check on max_len default value so we are sure the test works
lowerCAmelCase__ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ : Optional[int] = tempfile.mkdtemp()
lowerCAmelCase__ : Dict = """ He is very happy, UNwant\u00E9d,running"""
lowerCAmelCase__ : Optional[int] = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
lowerCAmelCase__ : Dict = tokenizer.__class__.from_pretrained(a__ )
lowerCAmelCase__ : Optional[int] = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
shutil.rmtree(a__ )
lowerCAmelCase__ : Tuple = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : List[str] = """ He is very happy, UNwant\u00E9d,running"""
tokenizer.add_tokens(['''bim''', '''bambam'''] )
lowerCAmelCase__ : Optional[Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('''new_additional_special_token''' )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
lowerCAmelCase__ : Optional[Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
tokenizer.save_pretrained(a__ )
lowerCAmelCase__ : Tuple = tokenizer.__class__.from_pretrained(a__ )
lowerCAmelCase__ : Optional[int] = after_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
self.assertIn('''new_additional_special_token''' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase__ : Dict = tokenizer.__class__.from_pretrained(a__ , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a__ )
def UpperCAmelCase__( self : Optional[Any] )-> List[str]:
lowerCAmelCase__ : Dict = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a__ )
with open(os.path.join(a__ , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase__ : Any = json.load(a__ )
with open(os.path.join(a__ , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
lowerCAmelCase__ : Union[str, Any] = json.load(a__ )
lowerCAmelCase__ : int = [F'<extra_id_{i}>' for i in range(125 )]
lowerCAmelCase__ : int = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
lowerCAmelCase__ : Any = added_tokens_extra_ids + [
"""an_additional_special_token"""
]
with open(os.path.join(a__ , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(a__ , a__ )
with open(os.path.join(a__ , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(a__ , a__ )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ : List[str] = tokenizer_class.from_pretrained(
a__ , )
self.assertIn(
'''an_additional_special_token''' , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
['''an_additional_special_token'''] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['''an_additional_special_token'''] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ : List[Any] = added_tokens_extra_ids + [AddedToken('''a_new_additional_special_token''' , lstrip=a__ )]
lowerCAmelCase__ : List[Any] = tokenizer_class.from_pretrained(
a__ , additional_special_tokens=a__ , )
self.assertIn('''a_new_additional_special_token''' , tokenizer.additional_special_tokens )
self.assertEqual(
['''a_new_additional_special_token'''] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['''a_new_additional_special_token'''] ) ) , )
def UpperCAmelCase__( self : str )-> List[str]:
lowerCAmelCase__ : List[str] = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , '''�''' )
def UpperCAmelCase__( self : int )-> Union[str, Any]:
pass
def UpperCAmelCase__( self : List[Any] )-> List[str]:
pass
def UpperCAmelCase__( self : List[str] )-> str:
pass
def UpperCAmelCase__( self : Dict )-> List[str]:
pass
def UpperCAmelCase__( self : Any )-> str:
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
lowerCAmelCase__ : int = self.get_tokenizers(fast=a__ , do_lower_case=a__ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
lowerCAmelCase__ : List[str] = ["""[CLS]""", """t""", """h""", """i""", """s""", """ """, """i""", """s""", """ """, """a""", """ """, """t""", """e""", """s""", """t""", """[SEP]"""]
lowerCAmelCase__ : Union[str, Any] = tokenizer.convert_tokens_to_string(a__ )
self.assertIsInstance(a__ , a__ )
| 131
|
"""simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =42
a_ =None
# Automatically constructed
a_ ="dict"
a_ =None
a_ =field(default="""Translation""" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def __call__( self : str ) -> List[Any]:
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def _lowercase ( self : Optional[Any] ) -> Dict:
from .features import Value
return {k: Value('string' ) for k in sorted(self.languages )}
@dataclass
class lowerCamelCase_ :
"""simple docstring"""
a_ =None
a_ =None
a_ =None
# Automatically constructed
a_ ="dict"
a_ =None
a_ =field(default="""TranslationVariableLanguages""" , init=SCREAMING_SNAKE_CASE_ , repr=SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> str:
__lowerCamelCase : str = sorted(set(self.languages ) ) if self.languages else None
__lowerCamelCase : Tuple = len(self.languages ) if self.languages else None
def __call__( self : str ) -> List[Any]:
return pa.struct({'language': pa.list_(pa.string() ), 'translation': pa.list_(pa.string() )} )
def _lowercase ( self : Any , _a : Any ) -> Optional[int]:
__lowerCamelCase : Any = set(self.languages )
if self.languages and set(a__ ) - lang_set:
raise ValueError(
f'Some languages in example ({", ".join(sorted(set(a__ ) - lang_set ) )}) are not in valid set ({", ".join(a__ )}).' )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
__lowerCamelCase : Optional[int] = []
for lang, text in translation_dict.items():
if isinstance(a__ , a__ ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
__lowerCamelCase : List[str] = zip(*sorted(a__ ) )
return {"language": languages, "translation": translations}
def _lowercase ( self : int ) -> int:
from .features import Sequence, Value
return {
"language": Sequence(Value('string' ) ),
"translation": Sequence(Value('string' ) ),
}
| 208
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
| 0
|
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
lowercase : Any = str(_lowerCamelCase )
lowercase : List[Any] = """""".join(sorted(_lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _snake_case( SCREAMING_SNAKE_CASE__ = 99 ) -> int:
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""" )
lowercase : int = 0
lowercase : str = 1
while True:
if check_bouncy(_lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F'''{solution(99)}''')
| 20
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44
| 0
|
"""simple docstring"""
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __A ( ) -> Optional[Any]:
__a : Optional[Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''')
__a : Optional[int] = parser.add_subparsers(help='''diffusers-cli command helpers''')
# Register commands
EnvironmentCommand.register_subcommand(_lowerCamelCase)
# Let's go
__a : Union[str, Any] = parser.parse_args()
if not hasattr(_lowerCamelCase , '''func'''):
parser.print_help()
exit(1)
# Run
__a : Dict = args.func(_lowerCamelCase)
service.run()
if __name__ == "__main__":
main()
| 160
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44
| 0
|
"""simple docstring"""
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def _A (__a , __a , __a ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = os.path.abspath(_lowerCamelCase )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
SCREAMING_SNAKE_CASE_ : int = tf.train.list_variables(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : Tuple = []
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
SCREAMING_SNAKE_CASE_ : Dict = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
SCREAMING_SNAKE_CASE_ : Dict = name[1:]
# figure out how many levels deep the name is
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(_lowerCamelCase )
# read data
SCREAMING_SNAKE_CASE_ : List[Any] = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase )
names.append('''/'''.join(_lowerCamelCase ) )
arrays.append(_lowerCamelCase )
logger.info(f'Read a total of {len(_lowerCamelCase ):,} layers' )
# Sanity check
if len(set(_lowerCamelCase ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(_lowerCamelCase ) )})' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(set(_lowerCamelCase ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(_lowerCamelCase , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = full_name.split('''/''' )
SCREAMING_SNAKE_CASE_ : str = model
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i, m_name in enumerate(_lowerCamelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
SCREAMING_SNAKE_CASE_ : Dict = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
SCREAMING_SNAKE_CASE_ : int = getattr(_lowerCamelCase , '''embeddings''' )
SCREAMING_SNAKE_CASE_ : List[Any] = getattr(_lowerCamelCase , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
SCREAMING_SNAKE_CASE_ : int = getattr(_lowerCamelCase , '''encoder''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(_lowerCamelCase , '''layer''' )
SCREAMING_SNAKE_CASE_ : List[Any] = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
SCREAMING_SNAKE_CASE_ : int = getattr(_lowerCamelCase , '''pooler''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(_lowerCamelCase , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_lowerCamelCase , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(_lowerCamelCase , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
SCREAMING_SNAKE_CASE_ : List[Any] = getattr(_lowerCamelCase , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
SCREAMING_SNAKE_CASE_ : str = getattr(_lowerCamelCase , '''token_type_embeddings''' )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append('''weight''' )
SCREAMING_SNAKE_CASE_ : Dict = getattr(_lowerCamelCase , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_lowerCamelCase , '''attention''' )
SCREAMING_SNAKE_CASE_ : str = getattr(_lowerCamelCase , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
SCREAMING_SNAKE_CASE_ : List[str] = getattr(_lowerCamelCase , '''attention''' )
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_lowerCamelCase , '''output''' )
SCREAMING_SNAKE_CASE_ : Dict = getattr(_lowerCamelCase , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
SCREAMING_SNAKE_CASE_ : int = getattr(_lowerCamelCase , '''attention''' )
SCREAMING_SNAKE_CASE_ : int = getattr(_lowerCamelCase , '''output''' )
SCREAMING_SNAKE_CASE_ : List[Any] = getattr(_lowerCamelCase , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
SCREAMING_SNAKE_CASE_ : List[str] = getattr(_lowerCamelCase , '''output''' )
SCREAMING_SNAKE_CASE_ : Any = getattr(_lowerCamelCase , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
SCREAMING_SNAKE_CASE_ : Tuple = getattr(_lowerCamelCase , '''output''' )
SCREAMING_SNAKE_CASE_ : List[str] = getattr(_lowerCamelCase , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
SCREAMING_SNAKE_CASE_ : str = getattr(_lowerCamelCase , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
SCREAMING_SNAKE_CASE_ : Optional[int] = getattr(_lowerCamelCase , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(_lowerCamelCase , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
SCREAMING_SNAKE_CASE_ : str = getattr(_lowerCamelCase , '''intermediate''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = getattr(_lowerCamelCase , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
SCREAMING_SNAKE_CASE_ : Any = getattr(_lowerCamelCase , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
SCREAMING_SNAKE_CASE_ : List[str] = getattr(_lowerCamelCase , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
SCREAMING_SNAKE_CASE_ : str = getattr(_lowerCamelCase , '''weight''' )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
SCREAMING_SNAKE_CASE_ : Any = """.""".join(_lowerCamelCase )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , _lowerCamelCase ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ : str = array.reshape(pointer.data.shape )
if "kernel" in full_name:
SCREAMING_SNAKE_CASE_ : Optional[int] = array.transpose()
if pointer.shape == array.shape:
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.from_numpy(_lowerCamelCase )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def _A (__a , __a , __a ) -> Tuple:
"""simple docstring"""
logger.info(f'Loading model based on config from {config_path}...' )
SCREAMING_SNAKE_CASE_ : List[Any] = BertConfig.from_json_file(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ : Tuple = BertModel(_lowerCamelCase )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
UpperCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--tf_checkpoint_path""", type=str, required=True, help="""Path to the TensorFlow 2.x checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
type=str,
required=True,
help="""The config json file corresponding to the BERT model. This specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""",
type=str,
required=True,
help="""Path to the output PyTorch model (must include filename).""",
)
UpperCAmelCase_ : List[str] = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 91
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
| 0
|
"""simple docstring"""
from functools import lru_cache
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->set:
'''simple docstring'''
a : Optional[int] = 2
a : List[Any] = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_lowerCamelCase )
if n > 1:
factors.add(_lowerCamelCase )
return factors
@lru_cache
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->int:
'''simple docstring'''
return len(unique_prime_factors(_lowerCamelCase ) )
def _SCREAMING_SNAKE_CASE ( _lowercase : list ) ->bool:
'''simple docstring'''
return len(set(_lowerCamelCase ) ) in (0, 1)
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->list:
'''simple docstring'''
a : Any = 2
while True:
# Increment each value of a generated range
a : str = [base + i for i in range(_lowerCamelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
a : List[str] = [upf_len(_lowerCamelCase ) for x in group]
checker.append(_lowerCamelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_lowerCamelCase ):
return group
# Increment our base variable by 1
base += 1
def _SCREAMING_SNAKE_CASE ( _lowercase : int = 4 ) ->int:
'''simple docstring'''
a : Optional[Any] = run(_lowerCamelCase )
return results[0] if len(_lowerCamelCase ) else None
if __name__ == "__main__":
print(solution())
| 105
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44
| 0
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
snake_case_ = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self):
lowercase__ : Tuple = False
def snake_case_ ( self , a , a , a , a):
if not self.initialized:
lowercase__ : Any = RagRetriever(
a__ , question_encoder_tokenizer=a__ , generator_tokenizer=a__ , index=a__ , init_retrieval=a__ , )
lowercase__ : Optional[int] = True
def snake_case_ ( self):
self.retriever.index.init_index()
def snake_case_ ( self , a , a):
lowercase__ : Any = self.retriever._main_retrieve(a__ , a__)
return doc_ids, retrieved_doc_embeds
class SCREAMING_SNAKE_CASE__ (SCREAMING_SNAKE_CASE_ ):
def __init__( self , a , a , a , a , a=None):
if index is not None and index.is_initialized() and len(a__) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ')
super().__init__(
a__ , question_encoder_tokenizer=a__ , generator_tokenizer=a__ , index=a__ , init_retrieval=a__ , )
lowercase__ : Union[str, Any] = retrieval_workers
if len(self.retrieval_workers) > 0:
ray.get(
[
worker.create_rag_retriever.remote(a__ , a__ , a__ , a__)
for worker in self.retrieval_workers
])
def snake_case_ ( self):
logger.info('initializing retrieval')
if len(self.retrieval_workers) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers])
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def snake_case_ ( self , a , a):
if len(self.retrieval_workers) > 0:
# Select a random retrieval actor.
lowercase__ : List[str] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers) - 1)]
lowercase__ : Tuple = ray.get(random_worker.retrieve.remote(a__ , a__))
else:
lowercase__ : Any = self._main_retrieve(a__ , a__)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(a__)
@classmethod
def snake_case_ ( cls , a , a=None , **a):
return super(a__ , cls).get_tokenizers(a__ , a__ , **a__)
@classmethod
def snake_case_ ( cls , a , a , a=None , **a):
lowercase__ : str = kwargs.pop('config' , a__) or RagConfig.from_pretrained(a__ , **a__)
lowercase__ : Tuple = RagTokenizer.from_pretrained(a__ , config=a__)
lowercase__ : str = rag_tokenizer.question_encoder
lowercase__ : Union[str, Any] = rag_tokenizer.generator
if indexed_dataset is not None:
lowercase__ : Any = """custom"""
lowercase__ : Any = CustomHFIndex(config.retrieval_vector_size , a__)
else:
lowercase__ : Union[str, Any] = cls._build_index(a__)
return cls(
a__ , question_encoder_tokenizer=a__ , generator_tokenizer=a__ , retrieval_workers=a__ , index=a__ , )
| 214
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = KandinskyImgaImgPipeline
_UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
_lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCAmelCase : int = MultilingualCLIP(a__ )
_lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : int = self.dummy_unet
_lowerCAmelCase : Dict = self.dummy_movq
_lowerCAmelCase : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ )
_lowerCAmelCase : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , a__ , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**a__ )
_lowerCAmelCase : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k"""
_lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
_lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Any = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase : Union[str, Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 44
| 0
|
_lowerCamelCase ='0.21.0'
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 334
|
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> int:
_lowerCAmelCase : Dict = list(range(0 ,_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
_lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,_lowerCamelCase ,_lowerCamelCase )]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
| 44
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A_ (SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = ProphetNetTokenizer
SCREAMING_SNAKE_CASE__ : Any = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : str = """UNwant\u00E9d,running"""
UpperCAmelCase_ : List[str] = """unwanted, running"""
return input_text, output_text
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ : List[str] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [9, 6, 7, 12, 10, 11] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = BasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = BasicTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = BasicTokenizer(do_lower_case=a__ , strip_accents=a__ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = BasicTokenizer(do_lower_case=a__ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
UpperCAmelCase_ : Optional[int] = {}
for i, token in enumerate(a__ ):
UpperCAmelCase_ : Optional[Any] = i
UpperCAmelCase_ : str = WordpieceTokenizer(vocab=a__ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
@require_torch
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
UpperCAmelCase_ : Dict = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
UpperCAmelCase_ : Any = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
UpperCAmelCase_ : Any = tokenizer(a__ , padding=a__ , return_tensors="pt" )
self.assertIsInstance(a__ , a__ )
UpperCAmelCase_ : Optional[Any] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(a__ , a__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = self.tokenizer_class.from_pretrained("microsoft/prophetnet-large-uncased" )
UpperCAmelCase_ : Dict = tokenizer.encode("sequence builders" , add_special_tokens=a__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=a__ )
UpperCAmelCase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(a__ )
UpperCAmelCase_ : List[Any] = tokenizer.build_inputs_with_special_tokens(a__ , a__ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 61
|
"""simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 0
|
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ):
# Format the message.
if name is None:
lowercase = None
else:
lowercase = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(50 - spaces ) + """s}"""
lowercase = fmt.format(_lowerCamelCase )
# Print and recurse (if needed).
if isinstance(_lowerCamelCase , _lowerCamelCase ):
if msg is not None:
print(_lowerCamelCase )
for k in val.keys():
recursive_print(_lowerCamelCase , val[k] , spaces + 2 )
elif isinstance(_lowerCamelCase , torch.Tensor ):
print(_lowerCamelCase , ':' , val.size() )
else:
print(_lowerCamelCase , ':' , _lowerCamelCase )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :]
# for compatibility with later versions of NVIDIA Megatron-LM.
# The inverse operation is performed inside Megatron-LM to read checkpoints:
# https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209
# If param is the weight tensor of the self-attention block, the returned tensor
# will have to be transposed one more time to be read by HuggingFace GPT2.
lowercase = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
lowercase = (num_heads, hidden_size, num_splits) + input_shape[1:]
lowercase = param.view(*_lowerCamelCase )
lowercase = param.transpose(0 , 2 )
lowercase = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
lowercase = (num_heads, num_splits, hidden_size) + input_shape[1:]
lowercase = param.view(*_lowerCamelCase )
lowercase = param.transpose(0 , 1 ).contiguous()
lowercase = param.view(*_lowerCamelCase )
return param
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# The converted output model.
lowercase = {}
# old versions did not store training args
lowercase = input_state_dict.get('args' , _lowerCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
lowercase = ds_args.padded_vocab_size
lowercase = ds_args.max_position_embeddings
lowercase = ds_args.hidden_size
lowercase = ds_args.num_layers
lowercase = ds_args.num_attention_heads
lowercase = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
lowercase = config.n_head
# The hidden_size per head.
lowercase = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
lowercase = input_state_dict["""checkpoint_version"""]
else:
lowercase = 0.0
# The model.
lowercase = input_state_dict["""model"""]
# The language model.
lowercase = model["""language_model"""]
# The embeddings.
lowercase = lm["""embedding"""]
# The word embeddings.
lowercase = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
lowercase = word_embeddings[: config.vocab_size, :]
lowercase = word_embeddings
# The position embeddings.
lowercase = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
lowercase = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'''pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match''' )
# Store the position embeddings.
lowercase = pos_embeddings
# The transformer.
lowercase = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
lowercase = re.compile(r'layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)' )
# The simple map of names for "automated" rules.
lowercase = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
lowercase = layer_re.match(_lowerCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
lowercase = int(m.group(1 ) )
# The name of the operation.
lowercase = m.group(2 )
# Is it a weight or a bias?
lowercase = m.group(3 )
# The name of the layer.
lowercase = F'''transformer.h.{layer_idx}'''
# For layernorm(s), simply store the layer norm.
if op_name.endswith('layernorm' ):
lowercase = """ln_1""" if op_name.startswith('input' ) else """ln_2"""
lowercase = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
lowercase = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , _lowerCamelCase , _lowerCamelCase )
lowercase = causal_mask
# Insert a "dummy" tensor for masked_bias.
lowercase = torch.tensor(-1e4 , dtype=torch.floataa )
lowercase = masked_bias
lowercase = fix_query_key_value_ordering(_lowerCamelCase , _lowerCamelCase , 3 , _lowerCamelCase , _lowerCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
lowercase = out_val.transpose(0 , 1 ).contiguous()
# Store.
lowercase = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
lowercase = fix_query_key_value_ordering(_lowerCamelCase , _lowerCamelCase , 3 , _lowerCamelCase , _lowerCamelCase )
# Store. No change of shape.
lowercase = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
lowercase = megatron_to_transformers[op_name]
lowercase = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
lowercase = megatron_to_transformers[op_name]
lowercase = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
lowercase = transformer["""final_layernorm.weight"""]
lowercase = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
lowercase = word_embeddings
# It should be done!
return output_state_dict
def UpperCAmelCase_ ( ):
# Create the argument parser.
lowercase = argparse.ArgumentParser()
parser.add_argument('--print-checkpoint-structure' , action='store_true' )
parser.add_argument(
'path_to_checkpoint' , type=_lowerCamelCase , help='Path to the checkpoint file (.zip archive or direct .pt file)' , )
parser.add_argument(
'--config_file' , default='' , type=_lowerCamelCase , help='An optional config json file describing the pre-trained model.' , )
lowercase = parser.parse_args()
# Extract the basename.
lowercase = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'''Extracting PyTorch state dictionary from {args.path_to_checkpoint}''' )
if args.path_to_checkpoint.endswith('.zip' ):
with zipfile.ZipFile(args.path_to_checkpoint , 'r' ) as checkpoint:
with checkpoint.open('release/mp_rank_00/model_optim_rng.pt' ) as pytorch_dict:
lowercase = torch.load(_lowerCamelCase , map_location='cpu' )
else:
lowercase = torch.load(args.path_to_checkpoint , map_location='cpu' )
lowercase = input_state_dict.get('args' , _lowerCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
lowercase = """gelu_fast"""
elif ds_args.openai_gelu:
lowercase = """gelu_new"""
else:
lowercase = """gelu"""
else:
# in the very early days this used to be "gelu_new"
lowercase = """gelu_new"""
# Spell out all parameters in case the defaults change.
lowercase = GPTaConfig(
vocab_size=5_0257 , n_positions=1024 , n_embd=1024 , n_layer=24 , n_head=16 , n_inner=4096 , activation_function=_lowerCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1e-5 , initializer_range=0.02 , summary_type='cls_index' , summary_use_proj=_lowerCamelCase , summary_activation=_lowerCamelCase , summary_proj_to_labels=_lowerCamelCase , summary_first_dropout=0.1 , scale_attn_weights=_lowerCamelCase , use_cache=_lowerCamelCase , bos_token_id=5_0256 , eos_token_id=5_0256 , )
else:
lowercase = GPTaConfig.from_json_file(args.config_file )
lowercase = ["""GPT2LMHeadModel"""]
# Convert.
print('Converting' )
lowercase = convert_megatron_checkpoint(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(_lowerCamelCase , _lowerCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
lowercase = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
lowercase = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
lowercase = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'''Unrecognized tokenizer_type {tokenizer_type}''' )
else:
lowercase = """gpt2"""
lowercase = AutoTokenizer.from_pretrained(_lowerCamelCase )
lowercase = type(_lowerCamelCase ).__name__
lowercase = tokenizer_class
# Store the config to file.
print('Saving config' )
config.save_pretrained(_lowerCamelCase )
# Save tokenizer based on args
print(F'''Adding {tokenizer_class} tokenizer files''' )
tokenizer.save_pretrained(_lowerCamelCase )
# Store the state_dict to file.
lowercase = os.path.join(_lowerCamelCase , 'pytorch_model.bin' )
print(F'''Saving checkpoint to \"{output_checkpoint_file}\"''' )
torch.save(_lowerCamelCase , _lowerCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 195
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , a__=None , a__=None , a__=None ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = {}
if prompt is not None:
_lowerCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
_lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowerCAmelCase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ):
return super().__call__(a__ , **a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : List[str] = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Tuple = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Optional[int] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_lowerCAmelCase : Any = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Union[str, Any] = None
return model_inputs
def __A ( self , a__ , a__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowerCAmelCase : Optional[int] = None
if generate_kwargs is None:
_lowerCAmelCase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Union[str, Any] = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for output_ids in model_outputs:
_lowerCAmelCase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 44
| 0
|
from __future__ import annotations
def lowerCAmelCase_ ( __UpperCAmelCase: int | str ) -> bool:
UpperCamelCase__ : Tuple = str(_lowerCamelCase )
return n == n[::-1]
def lowerCAmelCase_ ( __UpperCAmelCase: int = 100_0000 ) -> Union[str, Any]:
UpperCamelCase__ : Dict = 0
for i in range(1 , _lowerCamelCase ):
if is_palindrome(_lowerCamelCase ) and is_palindrome(bin(_lowerCamelCase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 201
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44
| 0
|
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase):
_a : Optional[int] = StableUnCLIPPipeline
_a : Optional[Any] = TEXT_TO_IMAGE_PARAMS
_a : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
_a : str = TEXT_TO_IMAGE_IMAGE_PARAMS
_a : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
_a : Optional[Any] = False
def UpperCAmelCase__( self : List[str] )-> Any:
lowerCAmelCase__ : Any = 32
lowerCAmelCase__ : str = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCAmelCase__ : str = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=a__ , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=a__ , num_layers=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = DDPMScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1000 , clip_sample=a__ , clip_sample_range=5.0 , beta_schedule='''squaredcos_cap_v2''' , )
# regular denoising components
torch.manual_seed(0 )
lowerCAmelCase__ : Any = StableUnCLIPImageNormalizer(embedding_dim=a__ )
lowerCAmelCase__ : int = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=a__ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCAmelCase__ : Tuple = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=a__ , layers_per_block=1 , upcast_attention=a__ , use_linear_projection=a__ , )
torch.manual_seed(0 )
lowerCAmelCase__ : List[Any] = DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0085 , beta_end=0.012 , prediction_type='''v_prediction''' , set_alpha_to_one=a__ , steps_offset=1 , )
torch.manual_seed(0 )
lowerCAmelCase__ : str = AutoencoderKL()
lowerCAmelCase__ : Any = {
# prior components
"""prior_tokenizer""": prior_tokenizer,
"""prior_text_encoder""": prior_text_encoder,
"""prior""": prior,
"""prior_scheduler""": prior_scheduler,
# image noising components
"""image_normalizer""": image_normalizer,
"""image_noising_scheduler""": image_noising_scheduler,
# regular denoising components
"""tokenizer""": tokenizer,
"""text_encoder""": text_encoder,
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
}
return components
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[str]=0 )-> Tuple:
if str(a__ ).startswith('''mps''' ):
lowerCAmelCase__ : Optional[int] = torch.manual_seed(a__ )
else:
lowerCAmelCase__ : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
lowerCAmelCase__ : List[Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""prior_num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase__( self : str )-> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = torch_device == """cpu"""
self._test_attention_slicing_forward_pass(test_max_difference=a__ )
def UpperCAmelCase__( self : Union[str, Any] )-> List[str]:
lowerCAmelCase__ : Union[str, Any] = torch_device in ["""cpu""", """mps"""]
self._test_inference_batch_single_identical(test_max_difference=a__ )
@slow
@require_torch_gpu
class _a ( unittest.TestCase):
def UpperCAmelCase__( self : List[Any] )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__( self : int )-> List[Any]:
lowerCAmelCase__ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy''' )
lowerCAmelCase__ : Optional[Any] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ : str = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase__ : Tuple = pipe('''anime turle''' , generator=a__ , output_type='''np''' )
lowerCAmelCase__ : Optional[int] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
def UpperCAmelCase__( self : str )-> Any:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ : Optional[int] = StableUnCLIPPipeline.from_pretrained('''fusing/stable-unclip-2-1-l''' , torch_dtype=torch.floataa )
lowerCAmelCase__ : int = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ : Any = pipe(
'''anime turtle''' , prior_num_inference_steps=2 , num_inference_steps=2 , output_type='''np''' , )
lowerCAmelCase__ : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 131
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_a : Tuple = logging.getLogger(__name__)
_a : Any = {'facebook/bart-base': BartForConditionalGeneration}
_a : List[str] = {'facebook/bart-base': BartTokenizer}
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : int = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" ,type=_lowerCamelCase ,default=5 ,help="""The maximum total input sequence length after tokenization.""" ,)
parser.add_argument(
"""--num_beams""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) ,)
parser.add_argument(
"""--model_name_or_path""" ,type=_lowerCamelCase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=_lowerCamelCase ,)
parser.add_argument(
"""--config_name""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Pretrained config name or path if not the same as model_name""" ,)
parser.add_argument(
"""--device""" ,type=_lowerCamelCase ,default="""cpu""" ,help="""Device where the model will be run""" ,)
parser.add_argument("""--output_file_path""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Where to store the final ONNX file.""" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any]="cpu" ) -> str:
_lowerCAmelCase : List[str] = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ) -> Tuple:
model.eval()
_lowerCAmelCase : str = None
_lowerCAmelCase : int = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
_lowerCAmelCase : List[Any] = """My friends are cool but they eat too many carbs."""
_lowerCAmelCase : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors="""pt""" ).to(model.device )
_lowerCAmelCase : Any = model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,num_beams=_lowerCamelCase ,max_length=_lowerCamelCase ,early_stopping=_lowerCamelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowerCamelCase ,(
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowerCamelCase ,opset_version=14 ,input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] ,output_names=["""output_ids"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} ,example_outputs=_lowerCamelCase ,)
logger.info("""Model exported to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : str = onnxruntime.InferenceSession(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = ort_sess.run(
_lowerCamelCase ,{
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_lowerCamelCase ),
"""max_length""": np.array(_lowerCamelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Any = parse_args()
_lowerCAmelCase : List[Any] = 5
_lowerCAmelCase : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Optional[Any] = torch.device(args.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = load_model_tokenizer(args.model_name_or_path ,_lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_lowerCamelCase )
if args.max_length:
_lowerCAmelCase : Dict = args.max_length
if args.num_beams:
_lowerCAmelCase : Dict = args.num_beams
if args.output_file_path:
_lowerCAmelCase : Any = args.output_file_path
else:
_lowerCAmelCase : Union[str, Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
| 0
|
'''simple docstring'''
from math import asin, atan, cos, radians, sin, sqrt, tan
_UpperCamelCase = 6378137.0
_UpperCamelCase = 6356752.314245
_UpperCamelCase = 6378137
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ) -> float:
__lowerCamelCase : Any = (AXIS_A - AXIS_B) / AXIS_A
__lowerCamelCase : List[str] = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
__lowerCamelCase : Dict = atan((1 - flattening) * tan(radians(_lowerCamelCase ) ) )
__lowerCamelCase : Optional[Any] = radians(_lowerCamelCase )
__lowerCamelCase : int = radians(_lowerCamelCase )
# Equation
__lowerCamelCase : Dict = sin((phi_a - phi_a) / 2 )
__lowerCamelCase : Tuple = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__lowerCamelCase : int = sqrt(sin_sq_phi + (cos(_lowerCamelCase ) * cos(_lowerCamelCase ) * sin_sq_lambda) )
return 2 * RADIUS * asin(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 208
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44
| 0
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_a : List[str]= BertJapaneseTokenizer
_a : Optional[Any]= False
_a : List[Any]= True
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
lowercase : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
lowercase : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : int = """こんにちは、世界。 \nこんばんは、世界。"""
lowercase : Tuple = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : int = self.get_input_output_texts(a__ )
lowercase : List[Any] = tokenizer.encode(a__ ,add_special_tokens=a__ )
lowercase : Optional[int] = tokenizer.decode(a__ ,clean_up_tokenization_spaces=a__ )
return text, ids
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.tokenizer_class(self.vocab_file )
lowercase : str = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(a__ ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""mecab""" )
self.assertIsNotNone(a__ )
lowercase : str = """こんにちは、世界。\nこんばんは、世界。"""
lowercase : int = tokenizer.tokenize(a__ )
self.assertListEqual(a__ ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Dict = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(a__ ,"""wb""" ) as handle:
pickle.dump(a__ ,a__ )
with open(a__ ,"""rb""" ) as handle:
lowercase : List[Any] = pickle.load(a__ )
lowercase : List[Any] = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ ,a__ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
try:
lowercase : List[Any] = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
try:
lowercase : str = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = MecabTokenizer(do_lower_case=a__ ,mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
try:
lowercase : Any = MecabTokenizer(
do_lower_case=a__ ,normalize_text=a__ ,mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = MecabTokenizer(normalize_text=a__ ,mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] ,)
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(a__ )
lowercase : List[str] = """こんにちは、世界。\nこんばんは、世界。"""
lowercase : int = tokenizer.tokenize(a__ )
self.assertListEqual(a__ ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Tuple = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(a__ ,"""wb""" ) as handle:
pickle.dump(a__ ,a__ )
with open(a__ ,"""rb""" ) as handle:
lowercase : Union[str, Any] = pickle.load(a__ )
lowercase : Tuple = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ ,a__ )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] ,)
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国人""", """参政権"""] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国人参政権"""] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = SudachiTokenizer(do_lower_case=a__ ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] ,)
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = SudachiTokenizer(normalize_text=a__ ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] ,)
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = SudachiTokenizer(trim_whitespace=a__ ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(a__ )
lowercase : Any = """こんにちは、世界。\nこんばんは、世界。"""
lowercase : Union[str, Any] = tokenizer.tokenize(a__ )
self.assertListEqual(a__ ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Optional[int] = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(a__ ,"""wb""" ) as handle:
pickle.dump(a__ ,a__ )
with open(a__ ,"""rb""" ) as handle:
lowercase : List[Any] = pickle.load(a__ )
lowercase : str = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ ,a__ )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = JumanppTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = JumanppTokenizer(normalize_text=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = JumanppTokenizer(trim_whitespace=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] ,)
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) ,["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
lowercase : Union[str, Any] = {}
for i, token in enumerate(a__ ):
lowercase : List[str] = i
lowercase : Optional[int] = WordpieceTokenizer(vocab=a__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) ,["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) ,["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) ,["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
lowercase : Union[str, Any] = tokenizer.subword_tokenizer
lowercase : Any = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(a__ ,["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
lowercase : Optional[int] = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(a__ ,["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
lowercase : Union[str, Any] = tokenizer.encode("""ありがとう。""" ,add_special_tokens=a__ )
lowercase : Optional[Any] = tokenizer.encode("""どういたしまして。""" ,add_special_tokens=a__ )
lowercase : List[str] = tokenizer.build_inputs_with_special_tokens(a__ )
lowercase : Any = tokenizer.build_inputs_with_special_tokens(a__ ,a__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_a : Tuple= BertJapaneseTokenizer
_a : Optional[Any]= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
lowercase : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
lowercase : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname ,subword_tokenizer_type="""character""" ,**a__ )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = """こんにちは、世界。 \nこんばんは、世界。"""
lowercase : Optional[Any] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.tokenizer_class(self.vocab_file ,subword_tokenizer_type="""character""" )
lowercase : Union[str, Any] = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
a__ ,["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) ,[3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
lowercase : List[Any] = {}
for i, token in enumerate(a__ ):
lowercase : int = i
lowercase : str = CharacterTokenizer(vocab=a__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) ,["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) ,["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
lowercase : List[Any] = tokenizer.encode("""ありがとう。""" ,add_special_tokens=a__ )
lowercase : Union[str, Any] = tokenizer.encode("""どういたしまして。""" ,add_special_tokens=a__ )
lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a__ )
lowercase : int = tokenizer.build_inputs_with_special_tokens(a__ ,a__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = """cl-tohoku/bert-base-japanese"""
lowercase : Optional[int] = AutoTokenizer.from_pretrained(a__ )
self.assertIsInstance(a__ ,a__ )
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" ,level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(a__ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
lowercase : Dict = """bert-base-cased"""
with self.assertLogs("""transformers""" ,level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(a__ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 20
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44
| 0
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
A = True
except (ImportError, ModuleNotFoundError):
A = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def __A ( a_ :str) -> str:
re.sub('''<n>''' , '''''' , _lowerCamelCase) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_lowerCamelCase))
| 160
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44
| 0
|
"""simple docstring"""
def _A (__a ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = min(_lowerCamelCase ) # min() finds the minimum value
SCREAMING_SNAKE_CASE_ : Dict = max(_lowerCamelCase ) # max() finds the maximum value
SCREAMING_SNAKE_CASE_ : Tuple = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
SCREAMING_SNAKE_CASE_ : Tuple = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
SCREAMING_SNAKE_CASE_ : Optional[int] = count + min_val
i += 1
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print('''Sorted order is:''' , ''' '''.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 91
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44
| 0
|
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
a : str = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
a : Union[str, Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
a : List[str] = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __a ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def __a ( self ) -> str:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__="uniform_average" , lowerCAmelCase__=True ) -> Tuple:
a : int = mean_squared_error(
a__ , a__ , sample_weight=a__ , multioutput=a__ , squared=a__ )
return {"mse": mse}
| 105
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44
| 0
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
snake_case_ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
snake_case_ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def snake_case__ ( ):
'''simple docstring'''
lowercase__ : Optional[Any] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowercase__ : Dict = bs[:]
lowercase__ : Union[str, Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
lowercase__ : List[Any] = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
lowercase__ : int = set()
lowercase__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Tuple = char
return pairs
class SCREAMING_SNAKE_CASE__ (SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = VOCAB_FILES_NAMES
__lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : List[str] = ["input_ids", "attention_mask"]
def __init__( self , a , a , a="replace" , a="<s>" , a="</s>" , a="</s>" , a="<s>" , a="<unk>" , a="<pad>" , a="<mask>" , a=False , **a , ):
lowercase__ : Union[str, Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__) if isinstance(a__ , a__) else bos_token
lowercase__ : Optional[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__) if isinstance(a__ , a__) else eos_token
lowercase__ : List[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__) if isinstance(a__ , a__) else sep_token
lowercase__ : Optional[int] = AddedToken(a__ , lstrip=a__ , rstrip=a__) if isinstance(a__ , a__) else cls_token
lowercase__ : List[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__) if isinstance(a__ , a__) else unk_token
lowercase__ : Dict = AddedToken(a__ , lstrip=a__ , rstrip=a__) if isinstance(a__ , a__) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase__ : Optional[Any] = AddedToken(a__ , lstrip=a__ , rstrip=a__) if isinstance(a__ , a__) else mask_token
super().__init__(
errors=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , add_prefix_space=a__ , **a__ , )
with open(a__ , encoding='utf-8') as vocab_handle:
lowercase__ : List[Any] = json.load(a__)
lowercase__ : int = {v: k for k, v in self.encoder.items()}
lowercase__ : Dict = errors # how to handle errors in decoding
lowercase__ : Any = bytes_to_unicode()
lowercase__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(a__ , encoding='utf-8') as merges_handle:
lowercase__ : Dict = merges_handle.read().split('\n')[1:-1]
lowercase__ : Union[str, Any] = [tuple(merge.split()) for merge in bpe_merges]
lowercase__ : Optional[int] = dict(zip(a__ , range(len(a__))))
lowercase__ : List[str] = {}
lowercase__ : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase__ : int = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
def snake_case_ ( self):
return len(self.encoder)
def snake_case_ ( self):
return dict(self.encoder , **self.added_tokens_encoder)
def snake_case_ ( self , a):
if token in self.cache:
return self.cache[token]
lowercase__ : Any = tuple(a__)
lowercase__ : Tuple = get_pairs(a__)
if not pairs:
return token
while True:
lowercase__ : Optional[Any] = min(a__ , key=lambda a: self.bpe_ranks.get(a__ , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__ : Optional[int] = bigram
lowercase__ : Union[str, Any] = []
lowercase__ : List[str] = 0
while i < len(a__):
try:
lowercase__ : str = word.index(a__ , a__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
lowercase__ : Tuple = j
if word[i] == first and i < len(a__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ : List[Any] = tuple(a__)
lowercase__ : Optional[int] = new_word
if len(a__) == 1:
break
else:
lowercase__ : Any = get_pairs(a__)
lowercase__ : str = """ """.join(a__)
lowercase__ : str = word
return word
def snake_case_ ( self , a):
lowercase__ : Tuple = []
for token in re.findall(self.pat , a__):
lowercase__ : List[Any] = """""".join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(a__).split(' '))
return bpe_tokens
def snake_case_ ( self , a):
return self.encoder.get(a__ , self.encoder.get(self.unk_token))
def snake_case_ ( self , a):
return self.decoder.get(a__)
def snake_case_ ( self , a):
lowercase__ : Optional[int] = """""".join(a__)
lowercase__ : int = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def snake_case_ ( self , a , a = None):
if not os.path.isdir(a__):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
lowercase__ : Tuple = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ : List[str] = os.path.join(
a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(a__ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a__ , ensure_ascii=a__) + '\n')
lowercase__ : Optional[int] = 0
with open(a__ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!')
lowercase__ : List[str] = token_index
writer.write(' '.join(a__) + '\n')
index += 1
return vocab_file, merge_file
def snake_case_ ( self , a , a = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ : Union[str, Any] = [self.cls_token_id]
lowercase__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self , a , a = None , a = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__)
if token_ids_a is None:
return [1] + ([0] * len(a__)) + [1]
return [1] + ([0] * len(a__)) + [1, 1] + ([0] * len(a__)) + [1]
def snake_case_ ( self , a , a = None):
lowercase__ : Dict = [self.sep_token_id]
lowercase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def snake_case_ ( self , a , a=False , **a):
lowercase__ : int = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(a__) > 0 and not text[0].isspace()):
lowercase__ : int = """ """ + text
return (text, kwargs)
| 214
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 50 ) -> int:
_lowerCAmelCase : int = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
| 0
|
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
SCREAMING_SNAKE_CASE =mf_knapsack(i - 1, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
else:
SCREAMING_SNAKE_CASE =max(
mf_knapsack(i - 1, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase ), mf_knapsack(i - 1, _lowerCamelCase, _lowerCamelCase, j - wt[i - 1] ) + val[i - 1], )
SCREAMING_SNAKE_CASE =val
return f[i][j]
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1, n + 1 ):
for w_ in range(1, w + 1 ):
if wt[i - 1] <= w_:
SCREAMING_SNAKE_CASE =max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_] )
else:
SCREAMING_SNAKE_CASE =dp[i - 1][w_]
return dp[n][w_], dp
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if not (isinstance(_lowerCamelCase, (list, tuple) ) and isinstance(_lowerCamelCase, (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
SCREAMING_SNAKE_CASE =len(_lowerCamelCase )
if num_items != len(_lowerCamelCase ):
SCREAMING_SNAKE_CASE =(
"""The number of weights must be the same as the number of values.\n"""
F'But got {num_items} weights and {len(_lowerCamelCase )} values'
)
raise ValueError(_lowerCamelCase )
for i in range(_lowerCamelCase ):
if not isinstance(wt[i], _lowerCamelCase ):
SCREAMING_SNAKE_CASE =(
"""All weights must be integers but got weight of """
F'type {type(wt[i] )} at index {i}'
)
raise TypeError(_lowerCamelCase )
SCREAMING_SNAKE_CASE =knapsack(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
SCREAMING_SNAKE_CASE =set()
_construct_solution(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
return optimal_val, example_optional_set
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_lowerCamelCase, _lowerCamelCase, i - 1, _lowerCamelCase, _lowerCamelCase )
else:
optimal_set.add(_lowerCamelCase )
_construct_solution(_lowerCamelCase, _lowerCamelCase, i - 1, j - wt[i - 1], _lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =[3, 2, 4, 4]
_lowerCamelCase =[4, 3, 2, 3]
_lowerCamelCase =4
_lowerCamelCase =6
_lowerCamelCase =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
_lowerCamelCase =knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
_lowerCamelCase =knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 334
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44
| 0
|
"""simple docstring"""
import numpy as np
class A_ :
'''simple docstring'''
def __init__( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = (0, 0)
UpperCAmelCase_ : Optional[Any] = None
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : Union[str, Any] = 0
def __eq__( self , lowercase_ ):
"""simple docstring"""
return self.position == cell.position
def UpperCamelCase__ ( self ):
"""simple docstring"""
print(self.position )
class A_ :
'''simple docstring'''
def __init__( self , lowercase_=(5, 5) ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = np.zeros(a__ )
UpperCAmelCase_ : List[str] = world_size[0]
UpperCAmelCase_ : List[str] = world_size[1]
def UpperCamelCase__ ( self ):
"""simple docstring"""
print(self.w )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = [
(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1),
]
UpperCAmelCase_ : Optional[Any] = cell.position[0]
UpperCAmelCase_ : str = cell.position[1]
UpperCAmelCase_ : Any = []
for n in neughbour_cord:
UpperCAmelCase_ : List[str] = current_x + n[0]
UpperCAmelCase_ : List[str] = current_y + n[1]
if 0 <= x < self.world_x_limit and 0 <= y < self.world_y_limit:
UpperCAmelCase_ : Optional[int] = Cell()
UpperCAmelCase_ : List[str] = (x, y)
UpperCAmelCase_ : Optional[int] = cell
neighbours.append(a__ )
return neighbours
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = []
UpperCAmelCase_ : Any = []
_open.append(_lowerCamelCase )
while _open:
UpperCAmelCase_ : List[str] = np.argmin([n.f for n in _open] )
UpperCAmelCase_ : int = _open[min_f]
_closed.append(_open.pop(_lowerCamelCase ) )
if current == goal:
break
for n in world.get_neigbours(_lowerCamelCase ):
for c in _closed:
if c == n:
continue
UpperCAmelCase_ : Any = current.g + 1
UpperCAmelCase_ : Tuple = n.position
UpperCAmelCase_ : Dict = goal.position
UpperCAmelCase_ : Union[str, Any] = (ya - ya) ** 2 + (xa - xa) ** 2
UpperCAmelCase_ : List[Any] = n.h + n.g
for c in _open:
if c == n and c.f < n.f:
continue
_open.append(_lowerCamelCase )
UpperCAmelCase_ : Dict = []
while current.parent is not None:
path.append(current.position )
UpperCAmelCase_ : Union[str, Any] = current.parent
path.append(current.position )
return path[::-1]
if __name__ == "__main__":
_a = Gridworld()
# Start position and goal
_a = Cell()
_a = (0, 0)
_a = Cell()
_a = (4, 4)
print(f"""path from {start.position} to {goal.position}""")
_a = astar(world, start, goal)
# Just for visual reasons.
for i in s:
_a = 1
print(world.w)
| 61
|
"""simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 0
|
from collections.abc import Sequence
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return sum(c * (x**i) for i, c in enumerate(_lowerCamelCase ) )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = 0.0
for coeff in reversed(_lowerCamelCase ):
lowercase = result * x + coeff
return result
if __name__ == "__main__":
UpperCAmelCase = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCAmelCase = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 195
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
| 0
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowercase__ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
a : torch.FloatTensor
a : torch.FloatTensor
a : Optional[torch.FloatTensor] = None
class lowercase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
a : Dict = 2
@register_to_config
def __init__( self, __magic_name__ = 0.02, __magic_name__ = 100, __magic_name__ = 1.007, __magic_name__ = 80, __magic_name__ = 0.05, __magic_name__ = 50, ) -> int:
"""simple docstring"""
# standard deviation of the initial noise distribution
UpperCamelCase__ : Union[str, Any] = sigma_max
# setable values
UpperCamelCase__ : int = None
UpperCamelCase__ : np.IntTensor = None
UpperCamelCase__ : torch.FloatTensor = None # sigma(t_i)
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Tuple:
"""simple docstring"""
return sample
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ = None ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = num_inference_steps
UpperCamelCase__ : Any = np.arange(0, self.num_inference_steps )[::-1].copy()
UpperCamelCase__ : int = torch.from_numpy(a__ ).to(a__ )
UpperCamelCase__ : str = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
UpperCamelCase__ : Dict = torch.tensor(a__, dtype=torch.floataa, device=a__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ = None ) -> Optional[Any]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
UpperCamelCase__ : Union[str, Any] = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1 )
else:
UpperCamelCase__ : Union[str, Any] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCamelCase__ : str = self.config.s_noise * randn_tensor(sample.shape, generator=a__ ).to(sample.device )
UpperCamelCase__ : Dict = sigma + gamma * sigma
UpperCamelCase__ : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = True, ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = sample_hat + sigma_hat * model_output
UpperCamelCase__ : List[str] = (sample_hat - pred_original_sample) / sigma_hat
UpperCamelCase__ : List[str] = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=a__, derivative=a__, pred_original_sample=a__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__, __magic_name__ = True, ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Tuple = sample_prev + sigma_prev * model_output
UpperCamelCase__ : str = (sample_prev - pred_original_sample) / sigma_prev
UpperCamelCase__ : Dict = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=a__, derivative=a__, pred_original_sample=a__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
raise NotImplementedError()
| 201
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44
| 0
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class _a ( unittest.TestCase):
def UpperCAmelCase__( self : str )-> int:
lowerCAmelCase__ : Optional[int] = """ylacombe/bark-small"""
lowerCAmelCase__ : Optional[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : int = """en_speaker_1"""
lowerCAmelCase__ : List[Any] = """This is a test string"""
lowerCAmelCase__ : Any = """speaker_embeddings_path.json"""
lowerCAmelCase__ : List[Any] = """speaker_embeddings"""
def UpperCAmelCase__( self : str , **_SCREAMING_SNAKE_CASE : Dict )-> Optional[Any]:
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def UpperCAmelCase__( self : List[str] )-> int:
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__( self : List[str] )-> Optional[Any]:
lowerCAmelCase__ : List[Any] = self.get_tokenizer()
lowerCAmelCase__ : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase__ : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCAmelCase__( self : int )-> Optional[int]:
lowerCAmelCase__ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowerCAmelCase__ : Tuple = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowerCAmelCase__ : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCAmelCase__( self : Optional[int] )-> Dict:
lowerCAmelCase__ : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowerCAmelCase__ : Union[str, Any] = 35
lowerCAmelCase__ : Union[str, Any] = 2
lowerCAmelCase__ : Optional[int] = 8
lowerCAmelCase__ : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowerCAmelCase__ : Dict = processor(text=self.input_string , voice_preset=a__ )
lowerCAmelCase__ : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowerCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(a__ , **a__ )
lowerCAmelCase__ : List[Any] = processor(text=self.input_string , voice_preset=a__ )
lowerCAmelCase__ : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowerCAmelCase__ : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCAmelCase__( self : Any )-> Dict:
lowerCAmelCase__ : int = self.get_tokenizer()
lowerCAmelCase__ : List[Any] = BarkProcessor(tokenizer=a__ )
lowerCAmelCase__ : Dict = processor(text=self.input_string )
lowerCAmelCase__ : Tuple = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 131
|
"""simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44
| 0
|
'''simple docstring'''
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> bool:
__lowerCamelCase : Optional[int] = len(_lowerCamelCase )
__lowerCamelCase : str = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__lowerCamelCase : Optional[int] = True
# sum is not zero and set is empty then false
for i in range(1 ,required_sum + 1 ):
__lowerCamelCase : List[str] = False
for i in range(1 ,arr_len + 1 ):
for j in range(1 ,required_sum + 1 ):
if arr[i - 1] > j:
__lowerCamelCase : Optional[Any] = subset[i - 1][j]
if arr[i - 1] <= j:
__lowerCamelCase : Any = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 208
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
| 0
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _snake_case( ) -> str:
lowercase : Optional[Any] = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=_lowerCamelCase , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=_lowerCamelCase , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=_lowerCamelCase )
return parser.parse_args()
def _snake_case( ) -> Optional[Any]:
lowercase : List[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : Union[str, Any] = script_fpath.stem
lowercase : Optional[Any] = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
lowercase : Tuple = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 20
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44
| 0
|
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowercase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Union[str, Any] = tempfile.mkdtemp()
__a : List[Any] = 8
# DPR tok
__a : str = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__a : List[str] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(a__ , exist_ok=a__ )
__a : int = os.path.join(a__ , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
__a : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__a : str = dict(zip(a__ , range(len(a__ ) ) ) )
__a : Optional[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__a : List[str] = {"""unk_token""": """<unk>"""}
__a : Dict = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(a__ , exist_ok=a__ )
__a : Dict = os.path.join(a__ , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
__a : Tuple = os.path.join(a__ , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a__ ) )
def _lowerCamelCase ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowerCamelCase ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def _lowerCamelCase ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def _lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ):
__a : Optional[Any] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.get_dummy_dataset()
__a : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__a : List[Any] = dataset
__a : Optional[Any] = RagRetriever(
a__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowerCamelCase ( self , _UpperCAmelCase ):
__a : Optional[Any] = self.get_dummy_dataset()
__a : Union[str, Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
__a : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
__a : List[Any] = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
__a : Optional[int] = RagRetriever(
a__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
__a : int = RagRetriever(
a__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , a__ ) , )
return retriever
def _lowerCamelCase ( self ):
__a : Optional[Any] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
__a : Optional[Any] = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
__a : Union[str, Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
__a : Union[str, Any] = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(a__ , open(a__ , '''wb''' ) )
__a : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
__a : str = RagRetriever(
a__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowerCamelCase ( self ):
__a : Tuple = 1
__a : Dict = self.get_dummy_canonical_hf_index_retriever()
__a : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : Union[str, Any] = retriever.retrieve(a__ , n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , a__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self ):
__a : Optional[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
__a : Optional[int] = self.get_dummy_dataset()
retriever.save_pretrained(a__ )
__a : Dict = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
__a : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : int = retriever.retrieve(a__ , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self ):
__a : int = 1
__a : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
__a : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : List[Any] = retriever.retrieve(a__ , n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , a__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self ):
__a : Any = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a__ )
__a : Tuple = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
__a : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : Any = retriever.retrieve(a__ , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self ):
__a : Any = 1
__a : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
__a : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : List[str] = retriever.retrieve(a__ , n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , a__ )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self ):
__a : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a__ )
__a : List[Any] = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
__a : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : List[Any] = retriever.retrieve(a__ , n_docs=1 )
self.assertTrue(out is not None )
def _lowerCamelCase ( self ):
__a : int = 1
__a : Tuple = self.get_dummy_legacy_index_retriever()
__a : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : List[Any] = retriever.retrieve(a__ , n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , a__ )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowerCamelCase ( self ):
__a : str = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a__ )
__a : Tuple = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ , a__ )
__a : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : List[str] = retriever.retrieve(a__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self ):
import torch
__a : Tuple = 1
__a : int = self.get_dummy_canonical_hf_index_retriever()
__a : Optional[Any] = [[5, 7], [10, 11]]
__a : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : List[str] = retriever(a__ , a__ , prefix=retriever.config.generator.prefix , n_docs=a__ )
__a : Optional[int] = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(a__ , a__ )
self.assertIsInstance(a__ , a__ )
self.assertIsInstance(a__ , np.ndarray )
__a : int = retriever(
a__ , a__ , prefix=retriever.config.generator.prefix , n_docs=a__ , return_tensors='''pt''' , )
__a : Union[str, Any] = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(a__ , torch.Tensor )
self.assertIsInstance(a__ , torch.Tensor )
self.assertIsInstance(a__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowerCamelCase ( self ):
__a : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
__a : Optional[int] = 1
__a : int = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
retriever.set_ctx_encoder_tokenizer(a__ )
__a : Any = [[5, 7], [10, 11]]
__a : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
__a : List[Any] = retriever(a__ , a__ , prefix=retriever.config.generator.prefix , n_docs=a__ )
self.assertEqual(
len(a__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , a__ ) # check for doc token related keys in dictionary.
| 160
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase_ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase_ : Union[str, Any] = {
'google/bit-50': 'https://huggingface.co/google/bit-50/resolve/main/config.json',
}
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__UpperCamelCase = "bit"
__UpperCamelCase = ["preactivation", "bottleneck"]
__UpperCamelCase = ["SAME", "VALID"]
def __init__( self : Dict , lowercase_ : List[str]=3 , lowercase_ : Optional[Any]=64 , lowercase_ : List[Any]=[256, 512, 1024, 2048] , lowercase_ : Optional[int]=[3, 4, 6, 3] , lowercase_ : Optional[int]="preactivation" , lowercase_ : Optional[Any]="relu" , lowercase_ : List[Any]=None , lowercase_ : Any=32 , lowercase_ : Dict=0.0 , lowercase_ : Union[str, Any]=False , lowercase_ : Any=32 , lowercase_ : Union[str, Any]=1 , lowercase_ : Optional[Any]=None , lowercase_ : Any=None , **lowercase_ : Dict , ):
'''simple docstring'''
super().__init__(**a__)
if layer_type not in self.layer_types:
raise ValueError(F'layer_type={layer_type} is not one of {",".join(self.layer_types)}')
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
SCREAMING_SNAKE_CASE_ : List[str] = global_padding.upper()
else:
raise ValueError(F'Padding strategy {global_padding} not supported')
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = embedding_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_sizes
SCREAMING_SNAKE_CASE_ : List[str] = depths
SCREAMING_SNAKE_CASE_ : Any = layer_type
SCREAMING_SNAKE_CASE_ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : List[Any] = global_padding
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_groups
SCREAMING_SNAKE_CASE_ : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE_ : Any = embedding_dynamic_padding
SCREAMING_SNAKE_CASE_ : Optional[Any] = output_stride
SCREAMING_SNAKE_CASE_ : List[Any] = width_factor
SCREAMING_SNAKE_CASE_ : Any = ["""stem"""] + [F'stage{idx}' for idx in range(1 , len(a__) + 1)]
SCREAMING_SNAKE_CASE_ : int = get_aligned_output_features_output_indices(
out_features=a__ , out_indices=a__ , stage_names=self.stage_names)
| 91
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
| 0
|
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a : Union[str, Any] = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def _SCREAMING_SNAKE_CASE ( _lowercase : Optional[int] , _lowercase : Tuple ) ->List[Any]:
'''simple docstring'''
inspect_dataset(_lowerCamelCase , _lowerCamelCase )
a : List[Any] = path + """.py"""
assert script_name in os.listdir(_lowerCamelCase )
assert "__pycache__" not in os.listdir(_lowerCamelCase )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def _SCREAMING_SNAKE_CASE ( _lowercase : Any , _lowercase : Dict ) ->int:
'''simple docstring'''
inspect_metric(_lowerCamelCase , _lowerCamelCase )
a : Tuple = path + """.py"""
assert script_name in os.listdir(_lowerCamelCase )
assert "__pycache__" not in os.listdir(_lowerCamelCase )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Tuple ) ->Optional[int]:
'''simple docstring'''
a : str = get_dataset_config_info(_lowerCamelCase , config_name=_lowerCamelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : List[str] ) ->Dict:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
get_dataset_config_info(_lowerCamelCase , config_name=_lowerCamelCase )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Union[str, Any] ) ->Tuple:
'''simple docstring'''
a : Dict = get_dataset_config_names(_lowerCamelCase )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : str , _lowercase : Dict ) ->Optional[Any]:
'''simple docstring'''
a : Any = get_dataset_infos(_lowerCamelCase )
assert list(infos.keys() ) == expected_configs
a : Any = expected_configs[0]
assert expected_config in infos
a : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Optional[int] ) ->int:
'''simple docstring'''
a : Optional[Any] = get_dataset_infos(_lowerCamelCase )
assert expected_config in infos
a : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def _SCREAMING_SNAKE_CASE ( _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any] ) ->int:
'''simple docstring'''
with pytest.raises(_lowerCamelCase ):
get_dataset_split_names(_lowerCamelCase , config_name=_lowerCamelCase )
| 105
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44
| 0
|
snake_case_ = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
snake_case_ = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
snake_case_ = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
snake_case_ = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
snake_case_ = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
snake_case_ = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
snake_case_ = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
snake_case_ = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 214
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = KandinskyImgaImgPipeline
_UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
_lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCAmelCase : int = MultilingualCLIP(a__ )
_lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : int = self.dummy_unet
_lowerCAmelCase : Dict = self.dummy_movq
_lowerCAmelCase : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ )
_lowerCAmelCase : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , a__ , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**a__ )
_lowerCAmelCase : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k"""
_lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
_lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Any = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase : Union[str, Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 44
| 0
|
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(_lowerCamelCase, _lowerCamelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_lowerCamelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(_lowerCamelCase ) == 1:
return True
SCREAMING_SNAKE_CASE =series[1] - series[0]
for index in range(len(_lowerCamelCase ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
if not isinstance(_lowerCamelCase, _lowerCamelCase ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(_lowerCamelCase ) == 0:
raise ValueError('Input list must be a non empty list' )
SCREAMING_SNAKE_CASE =0
for val in series:
answer += val
return answer / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 334
|
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> int:
_lowerCAmelCase : Dict = list(range(0 ,_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
_lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,_lowerCamelCase ,_lowerCamelCase )]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
| 44
| 0
|
"""simple docstring"""
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def __a ( ):
UpperCAmelCase_ : List[Any] = 10
UpperCAmelCase_ : List[str] = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
UpperCAmelCase_ : Optional[Any] = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(_lowerCamelCase ) ),
}, features=_lowerCamelCase, )
return dataset
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=_lowerCamelCase )
return filename
# FILE_CONTENT + files
_a = '\\n Text data.\n Second line of data.'
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = tmp_path_factory.mktemp("data" ) / """file.txt"""
UpperCAmelCase_ : Union[str, Any] = FILE_CONTENT
with open(_lowerCamelCase, "w" ) as f:
f.write(_lowerCamelCase )
return filename
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
import bza
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp("data" ) / """file.txt.bz2"""
UpperCAmelCase_ : Optional[Any] = bytes(_lowerCamelCase, "utf-8" )
with bza.open(_lowerCamelCase, "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
import gzip
UpperCAmelCase_ : Dict = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
UpperCAmelCase_ : Dict = bytes(_lowerCamelCase, "utf-8" )
with gzip.open(_lowerCamelCase, "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp("data" ) / """file.txt.lz4"""
UpperCAmelCase_ : str = bytes(_lowerCamelCase, "utf-8" )
with lza.frame.open(_lowerCamelCase, "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp("data" ) / """file.txt.7z"""
with pyazr.SevenZipFile(_lowerCamelCase, "w" ) as archive:
archive.write(_lowerCamelCase, arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase ):
import tarfile
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp("data" ) / """file.txt.tar"""
with tarfile.TarFile(_lowerCamelCase, "w" ) as f:
f.add(_lowerCamelCase, arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
import lzma
UpperCAmelCase_ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / """file.txt.xz"""
UpperCAmelCase_ : Optional[int] = bytes(_lowerCamelCase, "utf-8" )
with lzma.open(_lowerCamelCase, "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase ):
import zipfile
UpperCAmelCase_ : List[str] = tmp_path_factory.mktemp("data" ) / """file.txt.zip"""
with zipfile.ZipFile(_lowerCamelCase, "w" ) as f:
f.write(_lowerCamelCase, arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("data" ) / """file.txt.zst"""
UpperCAmelCase_ : Tuple = bytes(_lowerCamelCase, "utf-8" )
with zstd.open(_lowerCamelCase, "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = tmp_path_factory.mktemp("data" ) / """file.xml"""
UpperCAmelCase_ : Any = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(_lowerCamelCase, "w" ) as f:
f.write(_lowerCamelCase )
return filename
_a = [
{'col_1': '0', 'col_2': 0, 'col_3': 0.0},
{'col_1': '1', 'col_2': 1, 'col_3': 1.0},
{'col_1': '2', 'col_2': 2, 'col_3': 2.0},
{'col_1': '3', 'col_2': 3, 'col_3': 3.0},
]
_a = [
{'col_1': '4', 'col_2': 4, 'col_3': 4.0},
{'col_1': '5', 'col_2': 5, 'col_3': 5.0},
]
_a = {
'col_1': ['0', '1', '2', '3'],
'col_2': [0, 1, 2, 3],
'col_3': [0.0, 1.0, 2.0, 3.0],
}
_a = [
{'col_3': 0.0, 'col_1': '0', 'col_2': 0},
{'col_3': 1.0, 'col_1': '1', 'col_2': 1},
]
_a = [
{'col_1': 's0', 'col_2': 0, 'col_3': 0.0},
{'col_1': 's1', 'col_2': 1, 'col_3': 1.0},
{'col_1': 's2', 'col_2': 2, 'col_3': 2.0},
{'col_1': 's3', 'col_2': 3, 'col_3': 3.0},
]
@pytest.fixture(scope="session" )
def __a ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Union[str, Any] = datasets.Dataset.from_dict(_lowerCamelCase )
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=_lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(_lowerCamelCase ) ) as con:
UpperCAmelCase_ : int = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)", tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(_lowerCamelCase, "w", newline="" ) as f:
UpperCAmelCase_ : str = csv.DictWriter(_lowerCamelCase, fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(_lowerCamelCase, "w", newline="" ) as f:
UpperCAmelCase_ : Tuple = csv.DictWriter(_lowerCamelCase, fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(_lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase ):
import bza
UpperCAmelCase_ : Any = tmp_path_factory.mktemp("data" ) / """dataset.csv.bz2"""
with open(_lowerCamelCase, "rb" ) as f:
UpperCAmelCase_ : List[str] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(_lowerCamelCase, "wb" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp("data" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_lowerCamelCase, "w" ) as f:
f.write(_lowerCamelCase, arcname=os.path.basename(_lowerCamelCase ) )
f.write(_lowerCamelCase, arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tmp_path_factory.mktemp("data" ) / """dataset.csv.zip"""
with zipfile.ZipFile(_lowerCamelCase, "w" ) as f:
f.write(_lowerCamelCase, arcname=os.path.basename(csv_path.replace(".csv", ".CSV" ) ) )
f.write(_lowerCamelCase, arcname=os.path.basename(csva_path.replace(".csv", ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("data" ) / """dataset_with_dir.csv.zip"""
with zipfile.ZipFile(_lowerCamelCase, "w" ) as f:
f.write(_lowerCamelCase, arcname=os.path.join("main_dir", os.path.basename(_lowerCamelCase ) ) )
f.write(_lowerCamelCase, arcname=os.path.join("main_dir", os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
UpperCAmelCase_ : Any = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(_lowerCamelCase, "wb" ) as f:
UpperCAmelCase_ : List[str] = pq.ParquetWriter(_lowerCamelCase, schema=_lowerCamelCase )
UpperCAmelCase_ : Dict = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(_lowerCamelCase ) )] for k in DATA[0]}, schema=_lowerCamelCase )
writer.write_table(_lowerCamelCase )
writer.close()
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
UpperCAmelCase_ : List[Any] = {"""data""": DATA}
with open(_lowerCamelCase, "w" ) as f:
json.dump(_lowerCamelCase, _lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
UpperCAmelCase_ : List[str] = {"""data""": DATA_DICT_OF_LISTS}
with open(_lowerCamelCase, "w" ) as f:
json.dump(_lowerCamelCase, _lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(_lowerCamelCase, "w" ) as f:
for item in DATA:
f.write(json.dumps(_lowerCamelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(_lowerCamelCase, "w" ) as f:
for item in DATA:
f.write(json.dumps(_lowerCamelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(_lowerCamelCase, "w" ) as f:
for item in DATA_312:
f.write(json.dumps(_lowerCamelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(_lowerCamelCase, "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(_lowerCamelCase ) + "\n" )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase ):
import gzip
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(_lowerCamelCase, "rb" ) as orig_file:
with gzip.open(_lowerCamelCase, "wb" ) as zipped_file:
zipped_file.writelines(_lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase ):
import gzip
UpperCAmelCase_ : str = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(_lowerCamelCase, "rb" ) as orig_file:
with gzip.open(_lowerCamelCase, "wb" ) as zipped_file:
zipped_file.writelines(_lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp("data" ) / """dataset.jsonl.zip"""
with zipfile.ZipFile(_lowerCamelCase, "w" ) as f:
f.write(_lowerCamelCase, arcname=os.path.basename(_lowerCamelCase ) )
f.write(_lowerCamelCase, arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = tmp_path_factory.mktemp("data" ) / """dataset_nested.jsonl.zip"""
with zipfile.ZipFile(_lowerCamelCase, "w" ) as f:
f.write(_lowerCamelCase, arcname=os.path.join("nested", os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Dict = tmp_path_factory.mktemp("data" ) / """dataset_with_dir.jsonl.zip"""
with zipfile.ZipFile(_lowerCamelCase, "w" ) as f:
f.write(_lowerCamelCase, arcname=os.path.join("main_dir", os.path.basename(_lowerCamelCase ) ) )
f.write(_lowerCamelCase, arcname=os.path.join("main_dir", os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : str = tmp_path_factory.mktemp("data" ) / """dataset.jsonl.tar"""
with tarfile.TarFile(_lowerCamelCase, "w" ) as f:
f.add(_lowerCamelCase, arcname=os.path.basename(_lowerCamelCase ) )
f.add(_lowerCamelCase, arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tmp_path_factory.mktemp("data" ) / """dataset_nested.jsonl.tar"""
with tarfile.TarFile(_lowerCamelCase, "w" ) as f:
f.add(_lowerCamelCase, arcname=os.path.join("nested", os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Any = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase_ : Any = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(_lowerCamelCase, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[Any] = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase_ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(_lowerCamelCase, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : int = ["""0""", """1""", """2""", """3"""]
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp("data" ) / """dataset.abc"""
with open(_lowerCamelCase, "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tmp_path_factory.mktemp("data" ) / """dataset.text.zip"""
with zipfile.ZipFile(_lowerCamelCase, "w" ) as f:
f.write(_lowerCamelCase, arcname=os.path.basename(_lowerCamelCase ) )
f.write(_lowerCamelCase, arcname=os.path.basename(_lowerCamelCase ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Any = tmp_path_factory.mktemp("data" ) / """dataset_with_dir.text.zip"""
with zipfile.ZipFile(_lowerCamelCase, "w" ) as f:
f.write(_lowerCamelCase, arcname=os.path.join("main_dir", os.path.basename(_lowerCamelCase ) ) )
f.write(_lowerCamelCase, arcname=os.path.join("main_dir", os.path.basename(_lowerCamelCase ) ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = tmp_path_factory.mktemp("data" ) / """dataset.ext.zip"""
with zipfile.ZipFile(_lowerCamelCase, "w" ) as f:
f.write(_lowerCamelCase, arcname=os.path.basename("unsupported.ext" ) )
f.write(_lowerCamelCase, arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : List[str] = """\n""".join(["First", "Second\u2029with Unicode new line", "Third"] )
UpperCAmelCase_ : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(_lowerCamelCase, "w", encoding="utf-8" ) as f:
f.write(_lowerCamelCase )
return path
@pytest.fixture(scope="session" )
def __a ( ):
return os.path.join("tests", "features", "data", "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def __a ( ):
return os.path.join("tests", "features", "data", "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp("data" ) / """dataset.img.zip"""
with zipfile.ZipFile(_lowerCamelCase, "w" ) as f:
f.write(_lowerCamelCase, arcname=os.path.basename(_lowerCamelCase ) )
f.write(_lowerCamelCase, arcname=os.path.basename(_lowerCamelCase ).replace(".jpg", "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def __a ( __lowerCamelCase ):
UpperCAmelCase_ : Tuple = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt", "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt", "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt", "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt", "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt", "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 61
|
"""simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 0
|
import math
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if (
not isinstance(_lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if (
not isinstance(_lowerCamelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 195
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , a__=None , a__=None , a__=None ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = {}
if prompt is not None:
_lowerCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
_lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowerCAmelCase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ):
return super().__call__(a__ , **a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : List[str] = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Tuple = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Optional[int] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_lowerCAmelCase : Any = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Union[str, Any] = None
return model_inputs
def __A ( self , a__ , a__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowerCAmelCase : Optional[int] = None
if generate_kwargs is None:
_lowerCAmelCase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Union[str, Any] = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for output_ids in model_outputs:
_lowerCAmelCase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 44
| 0
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCAmelCase_ = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCAmelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCAmelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str ) -> tuple[str, float]:
UpperCamelCase__ : List[Any] = len([g for position, g in enumerate(_lowerCamelCase ) if g == main_target[position]] )
return (item, float(_lowerCamelCase ))
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: str ) -> tuple[str, str]:
UpperCamelCase__ : Dict = random.randint(0 , len(_lowerCamelCase ) - 1 )
UpperCamelCase__ : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
UpperCamelCase__ : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: list[str] ) -> str:
UpperCamelCase__ : Optional[Any] = list(_lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
UpperCamelCase__ : Optional[int] = random.choice(_lowerCamelCase )
return "".join(_lowerCamelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: tuple[str, float] , __UpperCAmelCase: list[tuple[str, float]] , __UpperCAmelCase: list[str] , ) -> list[str]:
UpperCamelCase__ : Optional[Any] = []
# Generate more children proportionally to the fitness score.
UpperCamelCase__ : Optional[Any] = int(parent_a[1] * 100 ) + 1
UpperCamelCase__ : Dict = 10 if child_n >= 10 else child_n
for _ in range(_lowerCamelCase ):
UpperCamelCase__ : Union[str, Any] = population_score[random.randint(0 , _lowerCamelCase )][0]
UpperCamelCase__ : int = crossover(parent_a[0] , _lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
return pop
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: list[str] , __UpperCAmelCase: bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
UpperCamelCase__ : Any = f"{N_POPULATION} must be bigger than {N_SELECTED}"
raise ValueError(_lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
UpperCamelCase__ : Dict = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
UpperCamelCase__ : Optional[int] = f"{not_in_genes_list} is not in genes list, evolution cannot converge"
raise ValueError(_lowerCamelCase )
# Generate random starting population.
UpperCamelCase__ : Optional[int] = []
for _ in range(_lowerCamelCase ):
population.append(''''''.join([random.choice(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
UpperCamelCase__ : Tuple = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
UpperCamelCase__ : Dict = [evaluate(_lowerCamelCase , _lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
UpperCamelCase__ : List[str] = sorted(_lowerCamelCase , key=lambda __UpperCAmelCase : x[1] , reverse=_lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f"\nGeneration: {generation}"
f"\nTotal Population:{total_population}"
f"\nBest score: {population_score[0][1]}"
f"\nBest string: {population_score[0][0]}" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
UpperCamelCase__ : Optional[int] = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCamelCase )
# Normalize population score to be between 0 and 1.
UpperCamelCase__ : Optional[int] = [
(item, score / len(_lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase ):
population.extend(select(population_score[int(_lowerCamelCase )] , _lowerCamelCase , _lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCAmelCase_ = (
'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'
)
UpperCAmelCase_ = list(
' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'
'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'
)
UpperCAmelCase_ = basic(target_str, genes_list)
print(
F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'''
)
| 201
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44
| 0
|
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'vocab_file': 'spiece.model'}
lowerCamelCase = {
'vocab_file': {
't5-small': 'https://huggingface.co/t5-small/resolve/main/spiece.model',
't5-base': 'https://huggingface.co/t5-base/resolve/main/spiece.model',
't5-large': 'https://huggingface.co/t5-large/resolve/main/spiece.model',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/spiece.model',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/spiece.model',
}
}
# TODO(PVP) - this should be removed in Transformers v5
lowerCamelCase = {
't5-small': 512,
't5-base': 512,
't5-large': 512,
't5-3b': 512,
't5-11b': 512,
}
lowerCamelCase = '▁'
class _a ( SCREAMING_SNAKE_CASE_):
_a : Tuple = VOCAB_FILES_NAMES
_a : Any = PRETRAINED_VOCAB_FILES_MAP
_a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Optional[int] = ["input_ids", "attention_mask"]
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int]="</s>" , _SCREAMING_SNAKE_CASE : int="<unk>" , _SCREAMING_SNAKE_CASE : Optional[int]="<pad>" , _SCREAMING_SNAKE_CASE : Dict=100 , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Optional[Any] = None , _SCREAMING_SNAKE_CASE : Optional[int]=True , **_SCREAMING_SNAKE_CASE : List[Any] , )-> Dict:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowerCAmelCase__ : List[Any] = [F'<extra_id_{i}>' for i in range(a__ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
lowerCAmelCase__ : Union[str, Any] = len(set(filter(lambda _SCREAMING_SNAKE_CASE : bool('''extra_id''' in str(a__ ) ) , a__ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
if legacy:
logger.warning_once(
F'You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to'
''' read the related pull request available at https://github.com/huggingface/transformers/pull/24565''' )
lowerCAmelCase__ : str = legacy
lowerCAmelCase__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , pad_token=a__ , extra_ids=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , legacy=a__ , **a__ , )
lowerCAmelCase__ : Any = vocab_file
lowerCAmelCase__ : Any = extra_ids
lowerCAmelCase__ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@staticmethod
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )-> List[str]:
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
lowerCAmelCase__ : List[Any] = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F' {pretrained_model_name_or_path} automatically truncating your input to'
F' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'
F' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , a__ , )
return max_model_length
@property
def UpperCAmelCase__( self : Dict )-> Union[str, Any]:
return self.sp_model.get_piece_size() + self._extra_ids
def UpperCAmelCase__( self : Any )-> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int = None , _SCREAMING_SNAKE_CASE : int = False )-> List[str]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(a__ )) + [1]
return ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def UpperCAmelCase__( self : Any )-> int:
return list(
set(filter(lambda _SCREAMING_SNAKE_CASE : bool(re.search(r'''<extra_id_\d+>''' , a__ ) ) is not None , self.additional_special_tokens ) ) )
def UpperCAmelCase__( self : Tuple )-> Union[str, Any]:
return [self._convert_token_to_id(a__ ) for token in self.get_sentinel_tokens()]
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Any )-> Tuple:
if len(a__ ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F'This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated'
''' eos tokens being added.''' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] = None )-> List[Any]:
lowerCAmelCase__ : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] = None )-> str:
lowerCAmelCase__ : Tuple = self._add_eos_if_not_present(a__ )
if token_ids_a is None:
return token_ids_a
else:
lowerCAmelCase__ : Optional[Any] = self._add_eos_if_not_present(a__ )
return token_ids_a + token_ids_a
def __getstate__( self : Optional[int] )-> Union[str, Any]:
lowerCAmelCase__ : Tuple = self.__dict__.copy()
lowerCAmelCase__ : List[str] = None
return state
def __setstate__( self : Tuple , _SCREAMING_SNAKE_CASE : int )-> List[str]:
lowerCAmelCase__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ : Any = {}
lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Tuple , **_SCREAMING_SNAKE_CASE : List[Any] )-> List[Any]:
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
lowerCAmelCase__ : Union[str, Any] = SPIECE_UNDERLINE + text.replace(a__ , ''' ''' )
return super().tokenize(a__ , **a__ )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple , **_SCREAMING_SNAKE_CASE : Dict )-> Optional[Any]:
if not self.legacy:
lowerCAmelCase__ : int = text.startswith(a__ )
if is_first:
lowerCAmelCase__ : Tuple = text[1:]
lowerCAmelCase__ : List[Any] = self.sp_model.encode(a__ , out_type=a__ )
if not self.legacy and not is_first and not text.startswith(''' ''' ) and tokens[0].startswith(a__ ):
lowerCAmelCase__ : Optional[int] = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] )-> List[Any]:
if token.startswith('''<extra_id_''' ):
lowerCAmelCase__ : Any = re.match(r'''<extra_id_(\d+)>''' , a__ )
lowerCAmelCase__ : Dict = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(a__ )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Tuple )-> Union[str, Any]:
if index < self.sp_model.get_piece_size():
lowerCAmelCase__ : Union[str, Any] = self.sp_model.IdToPiece(a__ )
else:
lowerCAmelCase__ : Union[str, Any] = F'<extra_id_{self.vocab_size - 1 - index}>'
return token
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : int )-> List[Any]:
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[str] = """"""
lowerCAmelCase__ : Tuple = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a__ ) + token
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : Union[str, Any] = []
else:
current_sub_tokens.append(a__ )
lowerCAmelCase__ : Tuple = False
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] = None )-> List[str]:
if not os.path.isdir(a__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ : Optional[Any] = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , '''wb''' ) as fi:
lowerCAmelCase__ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 131
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_a : Tuple = logging.getLogger(__name__)
_a : Any = {'facebook/bart-base': BartForConditionalGeneration}
_a : List[str] = {'facebook/bart-base': BartTokenizer}
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : int = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" ,type=_lowerCamelCase ,default=5 ,help="""The maximum total input sequence length after tokenization.""" ,)
parser.add_argument(
"""--num_beams""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) ,)
parser.add_argument(
"""--model_name_or_path""" ,type=_lowerCamelCase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=_lowerCamelCase ,)
parser.add_argument(
"""--config_name""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Pretrained config name or path if not the same as model_name""" ,)
parser.add_argument(
"""--device""" ,type=_lowerCamelCase ,default="""cpu""" ,help="""Device where the model will be run""" ,)
parser.add_argument("""--output_file_path""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Where to store the final ONNX file.""" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any]="cpu" ) -> str:
_lowerCAmelCase : List[str] = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ) -> Tuple:
model.eval()
_lowerCAmelCase : str = None
_lowerCAmelCase : int = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
_lowerCAmelCase : List[Any] = """My friends are cool but they eat too many carbs."""
_lowerCAmelCase : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors="""pt""" ).to(model.device )
_lowerCAmelCase : Any = model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,num_beams=_lowerCamelCase ,max_length=_lowerCamelCase ,early_stopping=_lowerCamelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowerCamelCase ,(
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowerCamelCase ,opset_version=14 ,input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] ,output_names=["""output_ids"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} ,example_outputs=_lowerCamelCase ,)
logger.info("""Model exported to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : str = onnxruntime.InferenceSession(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = ort_sess.run(
_lowerCamelCase ,{
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_lowerCamelCase ),
"""max_length""": np.array(_lowerCamelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Any = parse_args()
_lowerCAmelCase : List[Any] = 5
_lowerCAmelCase : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Optional[Any] = torch.device(args.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = load_model_tokenizer(args.model_name_or_path ,_lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_lowerCamelCase )
if args.max_length:
_lowerCAmelCase : Dict = args.max_length
if args.num_beams:
_lowerCAmelCase : Dict = args.num_beams
if args.output_file_path:
_lowerCAmelCase : Any = args.output_file_path
else:
_lowerCAmelCase : Union[str, Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
| 0
|
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class lowerCamelCase_ ( ctypes.Structure ):
"""simple docstring"""
a_ =[("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ) -> List[str]:
if os.name == "nt":
__lowerCamelCase : Optional[int] = CursorInfo()
__lowerCamelCase : Dict = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCamelCase ,ctypes.byref(_lowerCamelCase ) )
__lowerCamelCase : str = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCamelCase ,ctypes.byref(_lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ) -> List[str]:
if os.name == "nt":
__lowerCamelCase : Tuple = CursorInfo()
__lowerCamelCase : Any = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_lowerCamelCase ,ctypes.byref(_lowerCamelCase ) )
__lowerCamelCase : Dict = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_lowerCamelCase ,ctypes.byref(_lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ) -> List[Any]:
try:
hide_cursor()
yield
finally:
show_cursor()
| 208
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44
| 0
|
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowercase : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __snake_case ( SCREAMING_SNAKE_CASE_ ):
def __init__( self ,*snake_case ,**snake_case ):
'''simple docstring'''
super().__init__(*a__ ,**a__ )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ,snake_case=None ,snake_case=None ):
'''simple docstring'''
lowercase : List[str] = {}
lowercase : Union[str, Any] = {}
if prompt is not None:
lowercase : List[Any] = prompt
if generate_kwargs is not None:
lowercase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
lowercase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
lowercase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self ,snake_case ,**snake_case ):
'''simple docstring'''
return super().__call__(a__ ,**a__ )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
lowercase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ ,a__ ):
raise ValueError(
f"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
lowercase : Optional[int] = self.model.config.model_type
if model_type == "git":
lowercase : Optional[Any] = self.image_processor(images=a__ ,return_tensors=self.framework )
lowercase : List[str] = self.tokenizer(text=a__ ,add_special_tokens=a__ ).input_ids
lowercase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
lowercase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
lowercase : Tuple = self.image_processor(images=a__ ,header_text=a__ ,return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
lowercase : Optional[int] = self.image_processor(images=a__ ,return_tensors=self.framework )
lowercase : Optional[int] = self.tokenizer(a__ ,return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(f"Model type {model_type} does not support conditional text generation" )
else:
lowercase : Any = self.image_processor(images=a__ ,return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
lowercase : Union[str, Any] = None
return model_inputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=None ):
'''simple docstring'''
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] ,a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
lowercase : Optional[int] = None
if generate_kwargs is None:
lowercase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
lowercase : Tuple = model_inputs.pop(self.model.main_input_name )
lowercase : Union[str, Any] = self.model.generate(a__ ,**a__ ,**a__ )
return model_outputs
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Optional[int] = []
for output_ids in model_outputs:
lowercase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ ,skip_special_tokens=a__ ,)
}
records.append(a__ )
return records
| 20
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44
| 0
|
"""simple docstring"""
from ...processing_utils import ProcessorMixin
class __lowercase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowerCAmelCase = "WhisperFeatureExtractor"
__lowerCAmelCase = "WhisperTokenizer"
def __init__( self , _UpperCAmelCase , _UpperCAmelCase ):
super().__init__(a__ , a__ )
__a : int = self.feature_extractor
__a : Union[str, Any] = False
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=True ):
return self.tokenizer.get_decoder_prompt_ids(task=a__ , language=a__ , no_timestamps=a__ )
def __call__( self , *_UpperCAmelCase , **_UpperCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*a__ , **a__ )
__a : Union[str, Any] = kwargs.pop('''audio''' , a__ )
__a : Dict = kwargs.pop('''sampling_rate''' , a__ )
__a : Optional[Any] = kwargs.pop('''text''' , a__ )
if len(a__ ) > 0:
__a : Any = args[0]
__a : int = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
__a : str = self.feature_extractor(a__ , *a__ , sampling_rate=a__ , **a__ )
if text is not None:
__a : Any = self.tokenizer(a__ , **a__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a : Any = encodings["""input_ids"""]
return inputs
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.batch_decode(*a__ , **a__ )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.tokenizer.decode(*a__ , **a__ )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase="np" ):
return self.tokenizer.get_prompt_ids(a__ , return_tensors=a__ )
| 160
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44
| 0
|
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
SCREAMING_SNAKE_CASE_ : Tuple = FlaxDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=a__ , cache_dir=a__)
SCREAMING_SNAKE_CASE_ : Optional[int] = [t[-1] for t in os.walk(os.path.join(a__ , os.listdir(a__)[0] , '''snapshots'''))]
SCREAMING_SNAKE_CASE_ : Tuple = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('''.bin''') for f in files)
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'''hf-internal-testing/tiny-stable-diffusion-pipe''' , safety_checker=a__)
SCREAMING_SNAKE_CASE_ : str = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_ : str = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE_ : Optional[int] = 4
SCREAMING_SNAKE_CASE_ : Any = jax.device_count()
SCREAMING_SNAKE_CASE_ : Any = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : List[str] = pipeline.prepare_inputs(a__)
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : Optional[Any] = replicate(a__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.random.split(a__ , a__)
SCREAMING_SNAKE_CASE_ : str = shard(a__)
SCREAMING_SNAKE_CASE_ : Any = pipeline(a__ , a__ , a__ , a__ , jit=a__).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_51_47_45) < 1e-3
assert np.abs(np.abs(a__ , dtype=np.floataa).sum() - 49947.875) < 5e-1
SCREAMING_SNAKE_CASE_ : Dict = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
assert len(a__) == num_samples
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''flax''' , safety_checker=a__)
SCREAMING_SNAKE_CASE_ : Optional[int] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_ : Tuple = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE_ : Optional[int] = 50
SCREAMING_SNAKE_CASE_ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE_ : List[Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = pipeline.prepare_inputs(a__)
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : int = replicate(a__)
SCREAMING_SNAKE_CASE_ : Dict = jax.random.split(a__ , a__)
SCREAMING_SNAKE_CASE_ : Optional[int] = shard(a__)
SCREAMING_SNAKE_CASE_ : Dict = pipeline(a__ , a__ , a__ , a__ , jit=a__).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_65_24_01)) < 1e-3
assert np.abs((np.abs(a__ , dtype=np.floataa).sum() - 2383808.2)) < 5e-1
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=a__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_ : str = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE_ : List[Any] = 50
SCREAMING_SNAKE_CASE_ : int = jax.device_count()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline.prepare_inputs(a__)
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : Tuple = replicate(a__)
SCREAMING_SNAKE_CASE_ : Any = jax.random.split(a__ , a__)
SCREAMING_SNAKE_CASE_ : str = shard(a__)
SCREAMING_SNAKE_CASE_ : str = pipeline(a__ , a__ , a__ , a__ , jit=a__).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1e-3
assert np.abs((np.abs(a__ , dtype=np.floataa).sum() - 2373516.75)) < 5e-1
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa)
SCREAMING_SNAKE_CASE_ : List[Any] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_ : str = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE_ : Optional[int] = 50
SCREAMING_SNAKE_CASE_ : Dict = jax.device_count()
SCREAMING_SNAKE_CASE_ : int = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : List[str] = pipeline.prepare_inputs(a__)
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : Union[str, Any] = replicate(a__)
SCREAMING_SNAKE_CASE_ : List[Any] = jax.random.split(a__ , a__)
SCREAMING_SNAKE_CASE_ : Optional[int] = shard(a__)
SCREAMING_SNAKE_CASE_ : Optional[int] = pipeline(a__ , a__ , a__ , a__ , jit=a__).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_00_39_06)) < 1e-3
assert np.abs((np.abs(a__ , dtype=np.floataa).sum() - 2373516.75)) < 5e-1
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = FlaxDDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , set_alpha_to_one=a__ , steps_offset=1 , )
SCREAMING_SNAKE_CASE_ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , scheduler=a__ , safety_checker=a__ , )
SCREAMING_SNAKE_CASE_ : List[str] = scheduler.create_state()
SCREAMING_SNAKE_CASE_ : List[Any] = scheduler_state
SCREAMING_SNAKE_CASE_ : List[str] = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_ : str = jax.random.PRNGKey(0)
SCREAMING_SNAKE_CASE_ : List[Any] = 50
SCREAMING_SNAKE_CASE_ : str = jax.device_count()
SCREAMING_SNAKE_CASE_ : Optional[int] = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : str = pipeline.prepare_inputs(a__)
# shard inputs and rng
SCREAMING_SNAKE_CASE_ : Tuple = replicate(a__)
SCREAMING_SNAKE_CASE_ : List[Any] = jax.random.split(a__ , a__)
SCREAMING_SNAKE_CASE_ : Tuple = shard(a__)
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline(a__ , a__ , a__ , a__ , jit=a__).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.0_45_04_39_45)) < 1e-3
assert np.abs((np.abs(a__ , dtype=np.floataa).sum() - 2347693.5)) < 5e-1
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = (
"""A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of"""
""" field, close up, split lighting, cinematic"""
)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE_ : str = num_samples * [prompt]
SCREAMING_SNAKE_CASE_ : Dict = jax.random.split(jax.random.PRNGKey(0) , a__)
SCREAMING_SNAKE_CASE_ : Any = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=a__ , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = replicate(a__)
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline.prepare_inputs(a__)
SCREAMING_SNAKE_CASE_ : Dict = shard(a__)
SCREAMING_SNAKE_CASE_ : List[str] = pipeline(a__ , a__ , a__ , jit=a__).images
assert images.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : Dict = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
SCREAMING_SNAKE_CASE_ : Dict = FlaxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''bf16''' , dtype=jnp.bfloataa , safety_checker=a__ , use_memory_efficient_attention=a__ , )
SCREAMING_SNAKE_CASE_ : Any = replicate(a__)
SCREAMING_SNAKE_CASE_ : Tuple = pipeline.prepare_inputs(a__)
SCREAMING_SNAKE_CASE_ : List[str] = shard(a__)
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline(a__ , a__ , a__ , jit=a__).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ : List[str] = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice).max() < 1e-2
| 91
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44
| 0
|
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
@staticmethod
def __a ( *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( _lowercase : Image ) ->str:
'''simple docstring'''
a : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def _SCREAMING_SNAKE_CASE ( _lowercase : Image ) ->Dict:
'''simple docstring'''
a : Dict = np.array(_lowerCamelCase )
a : Union[str, Any] = npimg.shape
return {"hash": hashimage(_lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
lowerCamelCase : Optional[Any] =dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
lowerCamelCase : Union[str, Any] =dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
a : Union[str, Any] = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
pass
@require_tf
@unittest.skip("Image segmentation not implemented in TF" )
def __a ( self ) -> Tuple:
pass
@slow
@require_torch
def __a ( self ) -> Optional[int]:
a : Dict = pipeline("mask-generation" , model="facebook/sam-vit-huge" )
a : Optional[int] = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg" , points_per_batch=256 )
# Shortening by hashing
a : Dict = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.021},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_053},
{"mask": {"hash": "e2d0b7a0b7", "shape": (480, 640)}, "scores": 0.9_967},
{"mask": {"hash": "453c7844bd", "shape": (480, 640)}, "scores": 0.993},
{"mask": {"hash": "3d44f2926d", "shape": (480, 640)}, "scores": 0.9_909},
{"mask": {"hash": "64033ddc3f", "shape": (480, 640)}, "scores": 0.9_879},
{"mask": {"hash": "801064ff79", "shape": (480, 640)}, "scores": 0.9_834},
{"mask": {"hash": "6172f276ef", "shape": (480, 640)}, "scores": 0.9_716},
{"mask": {"hash": "b49e60e084", "shape": (480, 640)}, "scores": 0.9_612},
{"mask": {"hash": "a811e775fd", "shape": (480, 640)}, "scores": 0.9_599},
{"mask": {"hash": "a6a8ebcf4b", "shape": (480, 640)}, "scores": 0.9_552},
{"mask": {"hash": "9d8257e080", "shape": (480, 640)}, "scores": 0.9_532},
{"mask": {"hash": "32de6454a8", "shape": (480, 640)}, "scores": 0.9_516},
{"mask": {"hash": "af3d4af2c8", "shape": (480, 640)}, "scores": 0.9_499},
{"mask": {"hash": "3c6db475fb", "shape": (480, 640)}, "scores": 0.9_483},
{"mask": {"hash": "c290813fb9", "shape": (480, 640)}, "scores": 0.9_464},
{"mask": {"hash": "b6f0b8f606", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "92ce16bfdf", "shape": (480, 640)}, "scores": 0.943},
{"mask": {"hash": "c749b25868", "shape": (480, 640)}, "scores": 0.9_408},
{"mask": {"hash": "efb6cab859", "shape": (480, 640)}, "scores": 0.9_335},
{"mask": {"hash": "1ff2eafb30", "shape": (480, 640)}, "scores": 0.9_326},
{"mask": {"hash": "788b798e24", "shape": (480, 640)}, "scores": 0.9_262},
{"mask": {"hash": "abea804f0e", "shape": (480, 640)}, "scores": 0.8_999},
{"mask": {"hash": "7b9e8ddb73", "shape": (480, 640)}, "scores": 0.8_986},
{"mask": {"hash": "cd24047c8a", "shape": (480, 640)}, "scores": 0.8_984},
{"mask": {"hash": "6943e6bcbd", "shape": (480, 640)}, "scores": 0.8_873},
{"mask": {"hash": "b5f47c9191", "shape": (480, 640)}, "scores": 0.8_871}
] , )
# fmt: on
@require_torch
@slow
def __a ( self ) -> Optional[int]:
a : Optional[int] = """facebook/sam-vit-huge"""
a : Any = pipeline("mask-generation" , model=a__ )
a : Optional[int] = image_segmenter(
"http://images.cocodataset.org/val2017/000000039769.jpg" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
a : Tuple = []
for i, o in enumerate(outputs["masks"] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0_444},
{"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0_210},
{"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0_167},
{"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0_132},
{"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0_053},
] , )
| 105
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44
| 0
|
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[Any]=28_123 ):
'''simple docstring'''
lowercase__ : List[str] = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
lowercase__ : List[Any] = set()
lowercase__ : Tuple = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(_lowerCamelCase )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution())
| 214
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 50 ) -> int:
_lowerCAmelCase : int = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
| 0
|
from __future__ import annotations
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
create_all_state(1, _lowerCamelCase, _lowerCamelCase, [], _lowerCamelCase )
return result
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, ):
"""simple docstring"""
if level == 0:
total_list.append(current_list[:] )
return
for i in range(_lowerCamelCase, total_number - level + 2 ):
current_list.append(_lowerCamelCase )
create_all_state(i + 1, _lowerCamelCase, level - 1, _lowerCamelCase, _lowerCamelCase )
current_list.pop()
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
for i in total_list:
print(*_lowerCamelCase )
if __name__ == "__main__":
_lowerCamelCase =4
_lowerCamelCase =2
_lowerCamelCase =generate_all_combinations(n, k)
print_all_state(total_list)
| 334
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44
| 0
|
"""simple docstring"""
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=3 , lowercase_=0.6 , lowercase_=None , ):
"""simple docstring"""
UpperCAmelCase_ : str = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : Optional[int] = image_size
UpperCAmelCase_ : Tuple = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : Any = use_labels
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : int = num_attention_heads
UpperCAmelCase_ : Any = intermediate_size
UpperCAmelCase_ : Any = hidden_act
UpperCAmelCase_ : List[str] = hidden_dropout_prob
UpperCAmelCase_ : Tuple = attention_probs_dropout_prob
UpperCAmelCase_ : int = type_sequence_label_size
UpperCAmelCase_ : Tuple = initializer_range
UpperCAmelCase_ : Union[str, Any] = mask_ratio
UpperCAmelCase_ : Tuple = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCAmelCase_ : List[str] = (image_size // patch_size) ** 2
UpperCAmelCase_ : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : int = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = ViTMAEModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_ : List[Any] = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : int = ViTMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_ : Tuple = model(a__ )
UpperCAmelCase_ : Dict = (self.image_size // self.patch_size) ** 2
UpperCAmelCase_ : Tuple = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Dict = ViTMAEForPreTraining(a__ )
model.to(a__ )
model.eval()
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = model(a__ )
UpperCAmelCase_ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = self.prepare_config_and_inputs()
UpperCAmelCase_ : Dict = config_and_inputs
UpperCAmelCase_ : Dict = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A_ (SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : str = {"feature-extraction": ViTMAEModel} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ : Dict = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : str = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = ViTMAEModelTester(self )
UpperCAmelCase_ : List[str] = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = model_class(a__ )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Union[str, Any] = [*signature.parameters.keys()]
UpperCAmelCase_ : Any = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a__ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
# make masks reproducible
np.random.seed(2 )
UpperCAmelCase_ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
UpperCAmelCase_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCAmelCase_ : str = torch.from_numpy(a__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCAmelCase_ : Any = pt_noise
super().check_pt_tf_models(a__ , a__ , a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Union[str, Any] = model_class(a__ )
model.to(a__ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase_ : List[str] = model(**self._prepare_for_class(a__ , a__ ) )
UpperCAmelCase_ : Any = outputs[0].cpu().numpy()
UpperCAmelCase_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a__ )
UpperCAmelCase_ : Union[str, Any] = model_class.from_pretrained(a__ )
model.to(a__ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
UpperCAmelCase_ : List[Any] = model(**self._prepare_for_class(a__ , a__ ) )
# Make sure we don't have nans
UpperCAmelCase_ : Dict = after_outputs[0].cpu().numpy()
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(a__ , 1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = ViTMAEModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def __a ( ):
UpperCAmelCase_ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCAmelCase_ : str = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(a__ )
UpperCAmelCase_ : Tuple = self.default_image_processor
UpperCAmelCase_ : Optional[int] = prepare_img()
UpperCAmelCase_ : Tuple = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCAmelCase_ : str = ViTMAEConfig()
UpperCAmelCase_ : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCAmelCase_ : Union[str, Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : int = model(**a__ , noise=torch.from_numpy(a__ ).to(device=a__ ) )
# verify the logits
UpperCAmelCase_ : Dict = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , a__ )
UpperCAmelCase_ : Optional[Any] = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(a__ ) , atol=1E-4 ) )
| 61
|
"""simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 0
|
from collections import Counter
from timeit import timeit
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(' ' , '' ).lower() ).values() ) < 2
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = "" ):
if len(_lowerCamelCase ) == 0:
return True
lowercase = input_str.replace(' ' , '' ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase = {}
for character in lower_case_input_str:
lowercase = character_freq_dict.get(_lowerCamelCase , 0 ) + 1
lowercase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE = "" ):
print('\nFor string = ' , _lowerCamelCase , ':' )
print(
'> can_string_be_rearranged_as_palindrome_counter()' , '\tans =' , can_string_be_rearranged_as_palindrome_counter(_lowerCamelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome_counter(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
print(
'> can_string_be_rearranged_as_palindrome()' , '\tans =' , can_string_be_rearranged_as_palindrome(_lowerCamelCase ) , '\ttime =' , timeit(
'z.can_string_be_rearranged_as_palindrome(z.check_str)' , setup='import __main__ as z' , ) , 'seconds' , )
if __name__ == "__main__":
UpperCAmelCase = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
UpperCAmelCase = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"""{check_str} can {"" if status else "not "}be rearranged as a palindrome""")
| 195
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
| 0
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: List[Any] , __UpperCAmelCase: int , __UpperCAmelCase: Optional[int] , __UpperCAmelCase: Any ) -> int:
# Load configuration defined in the metadata file
with open(_lowerCamelCase ) as metadata_file:
UpperCamelCase__ : Tuple = json.load(_lowerCamelCase )
UpperCamelCase__ : Dict = LukeConfig(use_entity_aware_attention=_lowerCamelCase , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
UpperCamelCase__ : List[Any] = torch.load(_lowerCamelCase , map_location='''cpu''' )
# Load the entity vocab file
UpperCamelCase__ : Optional[int] = load_entity_vocab(_lowerCamelCase )
UpperCamelCase__ : List[Any] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
UpperCamelCase__ : Optional[Any] = AddedToken('''<ent>''' , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
UpperCamelCase__ : Optional[int] = AddedToken('''<ent2>''' , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
UpperCamelCase__ : List[Any] = LukeTokenizer.from_pretrained(_lowerCamelCase )
# Initialize the embeddings of the special tokens
UpperCamelCase__ : Optional[Any] = state_dict["""embeddings.word_embeddings.weight"""]
UpperCamelCase__ : Optional[Any] = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 )
UpperCamelCase__ : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 )
UpperCamelCase__ : Tuple = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
UpperCamelCase__ : Any = f"encoder.layer.{layer_index}.attention.self."
UpperCamelCase__ : str = state_dict[prefix + matrix_name]
UpperCamelCase__ : Dict = state_dict[prefix + matrix_name]
UpperCamelCase__ : Optional[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
UpperCamelCase__ : Union[str, Any] = state_dict["""entity_embeddings.entity_embeddings.weight"""]
UpperCamelCase__ : Dict = entity_emb[entity_vocab["""[MASK]"""]]
UpperCamelCase__ : int = LukeModel(config=_lowerCamelCase ).eval()
UpperCamelCase__ : str = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
if not (len(_lowerCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f"Missing keys {', '.join(_lowerCamelCase )}. Expected only missing embeddings.position_ids" )
if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )):
raise ValueError(
'''Unexpected keys'''
f" {', '.join([key for key in unexpected_keys if not (key.startswith('entity_predictions' ) or key.startswith('lm_head' ))] )}" )
# Check outputs
UpperCamelCase__ : List[Any] = LukeTokenizer.from_pretrained(_lowerCamelCase , task='''entity_classification''' )
UpperCamelCase__ : int = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
UpperCamelCase__ : Dict = (39, 42)
UpperCamelCase__ : Optional[Any] = tokenizer(_lowerCamelCase , entity_spans=[span] , add_prefix_space=_lowerCamelCase , return_tensors='''pt''' )
UpperCamelCase__ : List[str] = model(**_lowerCamelCase )
# Verify word hidden states
if model_size == "large":
UpperCamelCase__ : List[str] = torch.Size((1, 42, 1024) )
UpperCamelCase__ : List[str] = torch.tensor(
[[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] )
else: # base
UpperCamelCase__ : int = torch.Size((1, 42, 768) )
UpperCamelCase__ : Tuple = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
UpperCamelCase__ : Optional[int] = torch.Size((1, 1, 1024) )
UpperCamelCase__ : Tuple = torch.tensor([[0.0466, -0.0106, -0.0179]] )
else: # base
UpperCamelCase__ : Optional[int] = torch.Size((1, 1, 768) )
UpperCamelCase__ : Dict = torch.tensor([[0.1457, 0.1044, 0.0174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , _lowerCamelCase , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(_lowerCamelCase ) )
model.save_pretrained(_lowerCamelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: str ) -> Dict:
UpperCamelCase__ : List[str] = {}
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' ) as f:
for index, line in enumerate(_lowerCamelCase ):
UpperCamelCase__ : Optional[int] = line.rstrip().split('''\t''' )
UpperCamelCase__ : Any = index
return entity_vocab
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.')
parser.add_argument(
'--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.'
)
parser.add_argument(
'--entity_vocab_path',
default=None,
type=str,
help='Path to an entity_vocab.tsv file, containing the entity vocabulary.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.'
)
parser.add_argument(
'--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.'
)
UpperCAmelCase_ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 201
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44
| 0
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class _a ( SCREAMING_SNAKE_CASE_):
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict=13 , _SCREAMING_SNAKE_CASE : List[str]=7 , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : Any=True , _SCREAMING_SNAKE_CASE : List[str]=99 , _SCREAMING_SNAKE_CASE : List[Any]=32 , _SCREAMING_SNAKE_CASE : List[str]=5 , _SCREAMING_SNAKE_CASE : str=4 , _SCREAMING_SNAKE_CASE : Optional[int]=37 , _SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , _SCREAMING_SNAKE_CASE : str=0.1 , _SCREAMING_SNAKE_CASE : str=0.1 , _SCREAMING_SNAKE_CASE : Tuple=512 , _SCREAMING_SNAKE_CASE : int=16 , _SCREAMING_SNAKE_CASE : str=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE : int=False , _SCREAMING_SNAKE_CASE : List[str]=True , _SCREAMING_SNAKE_CASE : Tuple="None" , _SCREAMING_SNAKE_CASE : Tuple=3 , _SCREAMING_SNAKE_CASE : int=4 , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , )-> Dict:
lowerCAmelCase__ : Dict = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : List[Any] = seq_length
lowerCAmelCase__ : Dict = is_training
lowerCAmelCase__ : Dict = use_input_mask
lowerCAmelCase__ : int = use_token_type_ids
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : Tuple = hidden_size
lowerCAmelCase__ : Tuple = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : Tuple = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Union[str, Any] = num_labels
lowerCAmelCase__ : Optional[Any] = num_choices
lowerCAmelCase__ : Tuple = relative_attention
lowerCAmelCase__ : Tuple = position_biased_input
lowerCAmelCase__ : Dict = pos_att_type
lowerCAmelCase__ : Any = scope
def UpperCAmelCase__( self : Dict )-> Union[str, Any]:
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Optional[Any] = None
if self.use_input_mask:
lowerCAmelCase__ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase__ : str = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Any = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__( self : int )-> Union[str, Any]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any )-> Optional[int]:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict )-> str:
lowerCAmelCase__ : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
lowerCAmelCase__ : List[Any] = model(a__ , token_type_ids=a__ )[0]
lowerCAmelCase__ : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] )-> Dict:
lowerCAmelCase__ : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any )-> Dict:
lowerCAmelCase__ : int = self.num_labels
lowerCAmelCase__ : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def UpperCAmelCase__( self : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict )-> Optional[Any]:
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] )-> int:
lowerCAmelCase__ : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict )-> List[str]:
lowerCAmelCase__ : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
lowerCAmelCase__ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__( self : int )-> Dict:
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
(
lowerCAmelCase__
) : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _a ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase):
_a : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_a : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_a : Optional[Any] = True
_a : List[Any] = False
_a : List[Any] = False
_a : Dict = False
_a : Tuple = False
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
lowerCAmelCase__ : Optional[Any] = DebertaVaModelTester(self )
lowerCAmelCase__ : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def UpperCAmelCase__( self : int )-> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__( self : Tuple )-> Tuple:
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def UpperCAmelCase__( self : Union[str, Any] )-> str:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def UpperCAmelCase__( self : Tuple )-> Any:
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def UpperCAmelCase__( self : List[Any] )-> Optional[Any]:
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def UpperCAmelCase__( self : Any )-> Union[str, Any]:
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def UpperCAmelCase__( self : List[str] )-> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def UpperCAmelCase__( self : Any )-> Tuple:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _a ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def UpperCAmelCase__( self : Dict )-> Tuple:
pass
@slow
def UpperCAmelCase__( self : List[str] )-> List[Any]:
lowerCAmelCase__ : Union[str, Any] = DebertaVaModel.from_pretrained('''microsoft/deberta-v2-xlarge''' )
lowerCAmelCase__ : Dict = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCAmelCase__ : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
lowerCAmelCase__ : str = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' )
| 131
|
"""simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44
| 0
|
'''simple docstring'''
import re
import subprocess
import sys
_UpperCamelCase = subprocess.check_output('git merge-base main HEAD'.split()).decode('utf-8')
_UpperCamelCase = (
subprocess.check_output(f'''git diff --diff-filter=d --name-only {fork_point_sha}'''.split()).decode('utf-8').split()
)
_UpperCamelCase = '|'.join(sys.argv[1:])
_UpperCamelCase = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
_UpperCamelCase = [x for x in modified_files if regex.match(x)]
print(' '.join(relevant_modified_files), end='')
| 208
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
| 0
|
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_a : str= DanceDiffusionPipeline
_a : int= UNCONDITIONAL_AUDIO_GENERATION_PARAMS
_a : Union[str, Any]= PipelineTesterMixin.required_optional_params - {
"callback",
"latents",
"callback_steps",
"output_type",
"num_images_per_prompt",
}
_a : List[Any]= UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
_a : Optional[Any]= False
_a : Optional[int]= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Any = UNetaDModel(
block_out_channels=(32, 32, 64) ,extra_in_channels=16 ,sample_size=512 ,sample_rate=16000 ,in_channels=2 ,out_channels=2 ,flip_sin_to_cos=a__ ,use_timestep_embedding=a__ ,time_embedding_type="""fourier""" ,mid_block_type="""UNetMidBlock1D""" ,down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D""") ,up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip""") ,)
lowercase : List[str] = IPNDMScheduler()
lowercase : Dict = {
"""unet""": unet,
"""scheduler""": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=0 ):
'''simple docstring'''
if str(a__ ).startswith("""mps""" ):
lowercase : List[str] = torch.manual_seed(a__ )
else:
lowercase : Dict = torch.Generator(device=a__ ).manual_seed(a__ )
lowercase : Optional[Any] = {
"""batch_size""": 1,
"""generator""": generator,
"""num_inference_steps""": 4,
}
return inputs
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowercase : Dict = self.get_dummy_components()
lowercase : Union[str, Any] = DanceDiffusionPipeline(**a__ )
lowercase : Tuple = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
lowercase : Union[str, Any] = self.get_dummy_inputs(a__ )
lowercase : Optional[Any] = pipe(**a__ )
lowercase : int = output.audios
lowercase : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowercase : Dict = np.array([-0.7_265, 1.0_000, -0.8_388, 0.1_175, 0.9_498, -1.0_000] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
@skip_mps
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = torch_device
lowercase : List[str] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" )
lowercase : List[Any] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
lowercase : Optional[Any] = torch.manual_seed(0 )
lowercase : Union[str, Any] = pipe(generator=a__ ,num_inference_steps=100 ,audio_length_in_s=4.096 )
lowercase : Dict = output.audios
lowercase : Union[str, Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase : int = np.array([-0.0_192, -0.0_231, -0.0_318, -0.0_059, 0.0_002, -0.0_020] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = torch_device
lowercase : Tuple = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" ,torch_dtype=torch.floataa )
lowercase : str = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
lowercase : Dict = torch.manual_seed(0 )
lowercase : Optional[int] = pipe(generator=a__ ,num_inference_steps=100 ,audio_length_in_s=4.096 )
lowercase : Any = output.audios
lowercase : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowercase : Tuple = np.array([-0.0_367, -0.0_488, -0.0_771, -0.0_525, -0.0_444, -0.0_341] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1e-2
| 20
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class __lowercase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
__lowerCAmelCase = "bert"
def __init__( self , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(pad_token_id=a__ , **a__ )
__a : Optional[Any] = vocab_size
__a : Tuple = hidden_size
__a : Union[str, Any] = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : str = hidden_act
__a : Dict = intermediate_size
__a : Union[str, Any] = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : List[Any] = max_position_embeddings
__a : List[str] = type_vocab_size
__a : int = initializer_range
__a : List[str] = layer_norm_eps
__a : Union[str, Any] = position_embedding_type
__a : str = use_cache
__a : Union[str, Any] = classifier_dropout
class __lowercase ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
if self.task == "multiple-choice":
__a : Tuple = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__a : str = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 160
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44
| 0
|
"""simple docstring"""
def _A (__a = 1_00_00_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : str = 1
SCREAMING_SNAKE_CASE_ : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : int = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = (3 * number) + 1
counter += 1
if inputa not in counters:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = counter
if counter > pre_counter:
SCREAMING_SNAKE_CASE_ : List[str] = inputa
SCREAMING_SNAKE_CASE_ : str = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 91
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : Dict = {
'vinvino02/glpn-kitti': 'https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json',
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : str ="glpn"
def __init__( self , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=[2, 2, 2, 2] , lowerCAmelCase__=[8, 4, 2, 1] , lowerCAmelCase__=[32, 64, 160, 256] , lowerCAmelCase__=[7, 3, 3, 3] , lowerCAmelCase__=[4, 2, 2, 2] , lowerCAmelCase__=[1, 2, 5, 8] , lowerCAmelCase__=[4, 4, 4, 4] , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.02 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1E-6 , lowerCAmelCase__=64 , lowerCAmelCase__=10 , lowerCAmelCase__=-1 , **lowerCAmelCase__ , ) -> List[str]:
super().__init__(**a__ )
a : int = num_channels
a : str = num_encoder_blocks
a : str = depths
a : str = sr_ratios
a : Optional[Any] = hidden_sizes
a : Any = patch_sizes
a : Any = strides
a : str = mlp_ratios
a : Optional[int] = num_attention_heads
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : str = initializer_range
a : Any = drop_path_rate
a : Dict = layer_norm_eps
a : int = decoder_hidden_size
a : Optional[Any] = max_depth
a : Any = head_in_index
| 105
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44
| 0
|
from __future__ import annotations
from math import pow, sqrt
def snake_case__ ( SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float , SCREAMING_SNAKE_CASE_ : float ):
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(_lowerCamelCase , 2 ) - pow(_lowerCamelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowerCamelCase , 2 ) - pow(_lowerCamelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowerCamelCase , 2 ) + pow(_lowerCamelCase , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = KandinskyImgaImgPipeline
_UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
_lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCAmelCase : int = MultilingualCLIP(a__ )
_lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : int = self.dummy_unet
_lowerCAmelCase : Dict = self.dummy_movq
_lowerCAmelCase : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ )
_lowerCAmelCase : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , a__ , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**a__ )
_lowerCAmelCase : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k"""
_lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
_lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Any = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase : Union[str, Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 44
| 0
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a_ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__UpperCAmelCase = ""
__UpperCAmelCase = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : int ,snake_case : Optional[int] = None ,snake_case : Optional[Any] = None ,**snake_case : Dict ,):
super().__init__(self ,**a__ )
SCREAMING_SNAKE_CASE =repo_info
SCREAMING_SNAKE_CASE =token
SCREAMING_SNAKE_CASE =None
def _lowerCAmelCase ( self : List[Any] ):
if self.dir_cache is None:
SCREAMING_SNAKE_CASE ={}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE ={
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {'name': str(a__ ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _lowerCAmelCase ( self : int ,snake_case : int ,snake_case : str = "rb" ,**snake_case : Optional[Any] ,):
if not isinstance(self.repo_info ,a__ ):
raise NotImplementedError(f'Open is only implemented for dataset repositories, but got {self.repo_info}' )
SCREAMING_SNAKE_CASE =hf_hub_url(self.repo_info.id ,a__ ,revision=self.repo_info.sha )
return fsspec.open(
a__ ,mode=a__ ,headers=get_authentication_headers_for_url(a__ ,use_auth_token=self.token ) ,client_kwargs={'trust_env': True} ,).open()
def _lowerCAmelCase ( self : Optional[int] ,snake_case : Optional[Any] ,**snake_case : List[Any] ):
self._get_dirs()
SCREAMING_SNAKE_CASE =self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def _lowerCAmelCase ( self : List[str] ,snake_case : List[str] ,snake_case : int=False ,**snake_case : Dict ):
self._get_dirs()
SCREAMING_SNAKE_CASE =PurePosixPath(path.strip('/' ) )
SCREAMING_SNAKE_CASE ={}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE =PurePosixPath(p.strip('/' ) )
SCREAMING_SNAKE_CASE =p.parent
if root == path:
SCREAMING_SNAKE_CASE =f
SCREAMING_SNAKE_CASE =list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 334
|
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> int:
_lowerCAmelCase : Dict = list(range(0 ,_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
_lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,_lowerCamelCase ,_lowerCamelCase )]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
| 44
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 61
|
"""simple docstring"""
_a : List[str] = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 0
|
from ...configuration_utils import PretrainedConfig
class A_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = "bert-generation"
def __init__( self , snake_case=5_0358 , snake_case=1024 , snake_case=24 , snake_case=16 , snake_case=4096 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=0.02 , snake_case=1E-12 , snake_case=0 , snake_case=2 , snake_case=1 , snake_case="absolute" , snake_case=True , **snake_case , ):
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
lowercase = vocab_size
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = hidden_act
lowercase = intermediate_size
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = max_position_embeddings
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = position_embedding_type
lowercase = use_cache
| 195
|
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_a : Dict = logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE_ )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , *a__ , **a__ ):
super().__init__(*a__ , **a__ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == """tf""" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __A ( self , a__=None , a__=None , a__=None ):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Union[str, Any] = {}
if prompt is not None:
_lowerCAmelCase : List[Any] = prompt
if generate_kwargs is not None:
_lowerCAmelCase : List[str] = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
_lowerCAmelCase : str = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"""'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"""
""" please use only one""" )
_lowerCAmelCase : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , a__ , **a__ ):
return super().__call__(a__ , **a__ )
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : Tuple = load_image(a__ )
if prompt is not None:
if not isinstance(a__ , a__ ):
raise ValueError(
F"Received an invalid text input, got - {type(a__ )} - but expected a single string. "
"""Note also that one single text can be provided for conditional image to text generation.""" )
_lowerCAmelCase : Optional[int] = self.model.config.model_type
if model_type == "git":
_lowerCAmelCase : Optional[Any] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : List[str] = self.tokenizer(text=a__ , add_special_tokens=a__ ).input_ids
_lowerCAmelCase : Union[str, Any] = [self.tokenizer.cls_token_id] + input_ids
_lowerCAmelCase : Dict = torch.tensor(a__ ).unsqueeze(0 )
model_inputs.update({"""input_ids""": input_ids} )
elif model_type == "pix2struct":
_lowerCAmelCase : Tuple = self.image_processor(images=a__ , header_text=a__ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
_lowerCAmelCase : Optional[int] = self.image_processor(images=a__ , return_tensors=self.framework )
_lowerCAmelCase : Optional[int] = self.tokenizer(a__ , return_tensors=self.framework )
model_inputs.update(a__ )
else:
raise ValueError(F"Model type {model_type} does not support conditional text generation" )
else:
_lowerCAmelCase : Any = self.image_processor(images=a__ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
_lowerCAmelCase : Union[str, Any] = None
return model_inputs
def __A ( self , a__ , a__=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["""input_ids"""] , a__ )
and all(x is None for x in model_inputs["""input_ids"""] )
):
_lowerCAmelCase : Optional[int] = None
if generate_kwargs is None:
_lowerCAmelCase : List[str] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
_lowerCAmelCase : Tuple = model_inputs.pop(self.model.main_input_name )
_lowerCAmelCase : Union[str, Any] = self.model.generate(a__ , **a__ , **a__ )
return model_outputs
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = []
for output_ids in model_outputs:
_lowerCAmelCase : Any = {
"""generated_text""": self.tokenizer.decode(
a__ , skip_special_tokens=a__ , )
}
records.append(a__ )
return records
| 44
| 0
|
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__=13, __magic_name__=32, __magic_name__=3, __magic_name__=4, __magic_name__=[10, 20, 30, 40], __magic_name__=[2, 2, 3, 2], __magic_name__=True, __magic_name__=True, __magic_name__=37, __magic_name__="gelu", __magic_name__=10, __magic_name__=0.02, __magic_name__=["stage2", "stage3", "stage4"], __magic_name__=[2, 3, 4], __magic_name__=None, ) -> str:
"""simple docstring"""
UpperCamelCase__ : int = parent
UpperCamelCase__ : str = batch_size
UpperCamelCase__ : Dict = image_size
UpperCamelCase__ : List[Any] = num_channels
UpperCamelCase__ : Any = num_stages
UpperCamelCase__ : Tuple = hidden_sizes
UpperCamelCase__ : Dict = depths
UpperCamelCase__ : Any = is_training
UpperCamelCase__ : List[str] = use_labels
UpperCamelCase__ : List[Any] = intermediate_size
UpperCamelCase__ : Tuple = hidden_act
UpperCamelCase__ : Optional[int] = num_labels
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : List[Any] = out_features
UpperCamelCase__ : int = out_indices
UpperCamelCase__ : List[Any] = scope
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Union[str, Any] = None
if self.use_labels:
UpperCamelCase__ : int = ids_tensor([self.batch_size], self.num_labels )
UpperCamelCase__ : Dict = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
return ConvNextConfig(
num_channels=self.num_channels, hidden_sizes=self.hidden_sizes, depths=self.depths, num_stages=self.num_stages, hidden_act=self.hidden_act, is_decoder=a__, initializer_range=self.initializer_range, out_features=self.out_features, out_indices=self.out_indices, num_labels=self.num_labels, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Dict = ConvNextModel(config=a__ )
model.to(a__ )
model.eval()
UpperCamelCase__ : Tuple = model(a__ )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32), )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ConvNextForImageClassification(a__ )
model.to(a__ )
model.eval()
UpperCamelCase__ : List[Any] = model(a__, labels=a__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : List[str] = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
UpperCamelCase__ : str = model(a__ )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ), len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ), len(config.out_features ) )
self.parent.assertListEqual(model.channels, config.hidden_sizes[1:] )
# verify backbone works with out_features=None
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Optional[int] = ConvNextBackbone(config=a__ )
model.to(a__ )
model.eval()
UpperCamelCase__ : int = model(a__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ), 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ), [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ), 1 )
self.parent.assertListEqual(model.channels, [config.hidden_sizes[-1]] )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCamelCase__ : Optional[Any] = config_and_inputs
UpperCamelCase__ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
a : Tuple = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
a : List[Any] = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
a : Optional[int] = True
a : List[str] = False
a : Dict = False
a : str = False
a : Dict = False
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ConvNextModelTester(self )
UpperCamelCase__ : Optional[Any] = ConfigTester(self, config_class=a__, has_text_modality=a__, hidden_size=37 )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''' )
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''' )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Union[str, Any] = model_class(a__ )
UpperCamelCase__ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : int = [*signature.parameters.keys()]
UpperCamelCase__ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], a__ )
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*a__ )
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
def check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ ):
UpperCamelCase__ : Dict = model_class(a__ )
model.to(a__ )
model.eval()
with torch.no_grad():
UpperCamelCase__ : List[str] = model(**self._prepare_for_class(a__, a__ ) )
UpperCamelCase__ : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ : str = self.model_tester.num_stages
self.assertEqual(len(a__ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
UpperCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : Optional[int] = True
check_hidden_states_output(a__, a__, a__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : Optional[Any] = True
check_hidden_states_output(a__, a__, a__ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : int = ConvNextModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''' ).to(a__ )
UpperCamelCase__ : Dict = self.default_image_processor
UpperCamelCase__ : int = prepare_img()
UpperCamelCase__ : Union[str, Any] = image_processor(images=a__, return_tensors='''pt''' ).to(a__ )
# forward pass
with torch.no_grad():
UpperCamelCase__ : int = model(**a__ )
# verify the logits
UpperCamelCase__ : Union[str, Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, a__ )
UpperCamelCase__ : Tuple = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(a__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], a__, atol=1E-4 ) )
@require_torch
class lowercase__ ( unittest.TestCase , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
a : List[str] = (ConvNextBackbone,) if is_torch_available() else ()
a : int = ConvNextConfig
a : Dict = False
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
UpperCamelCase__ : str = ConvNextModelTester(self )
| 201
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
_a : Dict = datasets.utils.logging.get_logger(__name__)
@dataclass
class __A ( datasets.BuilderConfig ):
_UpperCamelCase : int = 10_000
_UpperCamelCase : Optional[List[str]] = None
_UpperCamelCase : Optional[datasets.Features] = None
class __A ( datasets.ArrowBasedBuilder ):
_UpperCamelCase : List[str] = ParquetConfig
def __A ( self ):
return datasets.DatasetInfo(features=self.config.features )
def __A ( self , a__ ):
if not self.config.data_files:
raise ValueError(F"At least one data file must be specified, but got data_files={self.config.data_files}" )
_lowerCAmelCase : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(a__ , (str, list, tuple) ):
_lowerCAmelCase : Any = data_files
if isinstance(a__ , a__ ):
_lowerCAmelCase : Tuple = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Any = [dl_manager.iter_files(a__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
_lowerCAmelCase : Optional[Any] = []
for split_name, files in data_files.items():
if isinstance(a__ , a__ ):
_lowerCAmelCase : Dict = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_lowerCAmelCase : Tuple = [dl_manager.iter_files(a__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(a__ ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Optional[Any] = datasets.Features.from_arrow_schema(pq.read_schema(a__ ) )
break
splits.append(datasets.SplitGenerator(name=a__ , gen_kwargs={"""files""": files} ) )
return splits
def __A ( self , a__ ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_lowerCAmelCase : Optional[int] = table_cast(a__ , self.info.features.arrow_schema )
return pa_table
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'" )
for file_idx, file in enumerate(itertools.chain.from_iterable(a__ ) ):
with open(a__ , """rb""" ) as f:
_lowerCAmelCase : Tuple = pq.ParquetFile(a__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
_lowerCAmelCase : Any = pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"{file_idx}_{batch_idx}", self._cast_table(a__ )
except ValueError as e:
logger.error(F"Failed to read file '{file}' with error {type(a__ )}: {e}" )
raise
| 44
| 0
|
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCamelCase = 'src/transformers'
lowerCamelCase = 'docs/source/en/tasks'
def lowerCamelCase_ ( _a , _a , _a ):
"""simple docstring"""
with open(_lowerCamelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
lowerCAmelCase__ : List[str] = f.readlines()
# Find the start prompt.
lowerCAmelCase__ : Any = 0
while not lines[start_index].startswith(_lowerCamelCase ):
start_index += 1
start_index += 1
lowerCAmelCase__ : List[Any] = start_index
while not lines[end_index].startswith(_lowerCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase = direct_transformers_import(TRANSFORMERS_PATH)
lowerCamelCase = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCamelCase = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def lowerCamelCase_ ( _a ):
"""simple docstring"""
lowerCAmelCase__ : int = TASK_GUIDE_TO_MODELS[task_guide]
lowerCAmelCase__ : Any = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCamelCase , set() )
lowerCAmelCase__ : List[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'[{name}](../model_doc/{code})' for code, name in model_names.items()] ) + "\n"
def lowerCamelCase_ ( _a , _a=False ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = _find_text_in_file(
filename=os.path.join(_lowerCamelCase , _lowerCamelCase ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
lowerCAmelCase__ : Any = get_model_list_for_task(_lowerCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'
''' to fix this.''' )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
lowerCamelCase = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 131
|
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
_a : Tuple = logging.getLogger(__name__)
_a : Any = {'facebook/bart-base': BartForConditionalGeneration}
_a : List[str] = {'facebook/bart-base': BartTokenizer}
def SCREAMING_SNAKE_CASE ( ) -> int:
_lowerCAmelCase : int = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" ,type=_lowerCamelCase ,default=5 ,help="""The maximum total input sequence length after tokenization.""" ,)
parser.add_argument(
"""--num_beams""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) ,)
parser.add_argument(
"""--model_name_or_path""" ,type=_lowerCamelCase ,help="""Path to pretrained model or model identifier from huggingface.co/models.""" ,required=_lowerCamelCase ,)
parser.add_argument(
"""--config_name""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Pretrained config name or path if not the same as model_name""" ,)
parser.add_argument(
"""--device""" ,type=_lowerCamelCase ,default="""cpu""" ,help="""Device where the model will be run""" ,)
parser.add_argument("""--output_file_path""" ,type=_lowerCamelCase ,default=_lowerCamelCase ,help="""Where to store the final ONNX file.""" )
_lowerCAmelCase : Optional[Any] = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Union[str, Any]="cpu" ) -> str:
_lowerCAmelCase : List[str] = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : str = None
_lowerCAmelCase : List[str] = 0
return huggingface_model, tokenizer
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : List[str] ,_lowerCamelCase : int ,_lowerCamelCase : List[Any] ,_lowerCamelCase : List[str] ) -> Tuple:
model.eval()
_lowerCAmelCase : str = None
_lowerCAmelCase : int = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
_lowerCAmelCase : List[Any] = """My friends are cool but they eat too many carbs."""
_lowerCAmelCase : Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] ,max_length=1024 ,return_tensors="""pt""" ).to(model.device )
_lowerCAmelCase : Any = model.generate(
inputs["""input_ids"""] ,attention_mask=inputs["""attention_mask"""] ,num_beams=_lowerCamelCase ,max_length=_lowerCamelCase ,early_stopping=_lowerCamelCase ,decoder_start_token_id=model.config.decoder_start_token_id ,)
torch.onnx.export(
_lowerCamelCase ,(
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) ,_lowerCamelCase ,opset_version=14 ,input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] ,output_names=["""output_ids"""] ,dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} ,example_outputs=_lowerCamelCase ,)
logger.info("""Model exported to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : str = onnxruntime.InferenceSession(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = ort_sess.run(
_lowerCamelCase ,{
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_lowerCamelCase ),
"""max_length""": np.array(_lowerCamelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} ,)
np.testing.assert_allclose(summary_ids.cpu().numpy() ,ort_out[0] ,rtol=1e-3 ,atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Any = parse_args()
_lowerCAmelCase : List[Any] = 5
_lowerCAmelCase : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" ,datefmt="""%m/%d/%Y %H:%M:%S""" ,level=logging.INFO ,)
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Optional[Any] = torch.device(args.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = load_model_tokenizer(args.model_name_or_path ,_lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_lowerCamelCase )
if args.max_length:
_lowerCAmelCase : Dict = args.max_length
if args.num_beams:
_lowerCAmelCase : Dict = args.num_beams
if args.output_file_path:
_lowerCAmelCase : Any = args.output_file_path
else:
_lowerCAmelCase : Union[str, Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if __name__ == "__main__":
main()
| 44
| 0
|
'''simple docstring'''
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def a_ ( _lowerCAmelCase ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase ,_lowerCamelCase )
def a_ ( _lowerCAmelCase ) -> int:
__lowerCamelCase : str = emb.weight.shape
__lowerCamelCase : List[str] = nn.Linear(_lowerCamelCase ,_lowerCamelCase ,bias=_lowerCamelCase )
__lowerCamelCase : Tuple = emb.weight.data
return lin_layer
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=None ) -> List[Any]:
__lowerCamelCase : Any = {}
for old_key in state_dict.keys():
__lowerCamelCase : Optional[int] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__lowerCamelCase : Optional[int] = key.replace('moe_layer.experts.0' ,F'ffn.experts.expert_{expert_idx}' )
else:
__lowerCamelCase : Any = key.replace('moe_layer.experts.' ,'ffn.experts.expert_' )
if "gate" in key:
__lowerCamelCase : int = key.replace('.moe_layer.gate.wg' ,'.ffn.router.classifier' )
if "fc2" and "experts" not in key:
__lowerCamelCase : List[Any] = key.replace('.fc2.' ,'.ffn.fc2.' )
if "fc1" and "experts" not in key:
__lowerCamelCase : Optional[int] = key.replace('.fc1.' ,'.ffn.fc1.' )
if ".encoder_attn." in key:
__lowerCamelCase : List[str] = key.replace('.encoder_attn.' ,'.cross_attention.' )
if "encoder_attn_layer_norm" in key:
__lowerCamelCase : Union[str, Any] = key.replace('encoder_attn_layer_norm' ,'cross_attention_layer_norm' )
if "final_layer_norm" in key:
__lowerCamelCase : Optional[int] = key.replace('final_layer_norm' ,'ff_layer_norm' )
__lowerCamelCase : Union[str, Any] = state_dict[old_key]
return new_dict
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase = WEIGHTS_NAME ) -> int:
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : Optional[Any] = 0
os.makedirs(_lowerCamelCase ,exist_ok=_lowerCamelCase )
for expert in range(_lowerCamelCase ):
__lowerCamelCase : Optional[int] = switch_checkpoint_path + F'-rank-{expert}.pt'
if os.path.isfile(_lowerCamelCase ):
__lowerCamelCase : List[str] = torch.load(_lowerCamelCase )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__lowerCamelCase : List[str] = rename_fairseq_keys(_lowerCamelCase ,_lowerCamelCase )
__lowerCamelCase : Tuple = os.path.join(
_lowerCamelCase ,weights_name.replace('.bin' ,F'-{len(_lowerCamelCase )+1:05d}-of-???.bin' ) )
torch.save(_lowerCamelCase ,_lowerCamelCase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_lowerCamelCase )[0]].dtype )
# Add the last block
__lowerCamelCase : Dict = os.path.join(_lowerCamelCase ,weights_name.replace('.bin' ,F'-{len(_lowerCamelCase )+1:05d}-of-???.bin' ) )
__lowerCamelCase : Tuple = torch.load(switch_checkpoint_path + '-shared.pt' )["""model"""]
remove_ignore_keys_(_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = rename_fairseq_keys(_lowerCamelCase ,_lowerCamelCase )
__lowerCamelCase : int = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowerCamelCase ) == 1:
__lowerCamelCase : Any = os.path.join(_lowerCamelCase ,_lowerCamelCase )
torch.save(_lowerCamelCase ,_lowerCamelCase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowerCamelCase ,_lowerCamelCase )
# Otherwise, let's build the index
__lowerCamelCase : int = {}
for idx, shard in enumerate(_lowerCamelCase ):
__lowerCamelCase : Optional[int] = weights_name.replace('.bin' ,F'-{idx+1:05d}-of-{len(_lowerCamelCase ):05d}.bin' )
__lowerCamelCase : int = os.path.join(_lowerCamelCase ,weights_name.replace('.bin' ,F'-{idx+1:05d}-of-???.bin' ) )
os.rename(_lowerCamelCase ,os.path.join(_lowerCamelCase ,_lowerCamelCase ) )
for key in shard:
__lowerCamelCase : Tuple = shard_file
# Add the metadata
__lowerCamelCase : Any = {"""total_size""": total_size}
__lowerCamelCase : List[str] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowerCamelCase ,_lowerCamelCase ) ,'w' ,encoding='utf-8' ) as f:
__lowerCamelCase : List[str] = json.dumps(_lowerCamelCase ,indent=2 ,sort_keys=_lowerCamelCase ) + """\n"""
f.write(_lowerCamelCase )
return metadata, index
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--nllb_moe_checkpoint_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--dtype', default='float32', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b',
type=str,
required=False,
help='Path to the output pytorch model.',
)
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_UpperCamelCase = NllbMoeConfig.from_pretrained(
'facebook/nllb-200-3.3B', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_UpperCamelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('Done')
model.save_pretrained(args.pytorch_dump_folder_path)
| 208
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Any ) -> List[Any]: # noqa: E741
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = [0] * n
_lowerCAmelCase : str = [False] * n
_lowerCAmelCase : str = [False] * n
def dfs(_lowerCamelCase : Tuple ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ,_lowerCamelCase : str ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase : Union[str, Any] = dfs(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : Optional[int] = min(low[at] ,low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase : int = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase : Tuple = True
else:
_lowerCAmelCase : Union[str, Any] = min(low[at] ,_lowerCamelCase )
return out_edge_count
for i in range(_lowerCamelCase ):
if not visited[i]:
_lowerCAmelCase : int = 0
_lowerCAmelCase : Dict = dfs(_lowerCamelCase ,_lowerCamelCase ,-1 ,_lowerCamelCase )
_lowerCAmelCase : List[str] = out_edge_count > 1
for x in range(len(_lowerCamelCase ) ):
if is_art[x] is True:
print(_lowerCamelCase )
# Adjacency list of graph
_a : Optional[Any] = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 44
| 0
|
import itertools
import math
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case( ) -> Optional[Any]:
lowercase : Any = 2
while True:
if is_prime(_lowerCamelCase ):
yield num
num += 1
def _snake_case( SCREAMING_SNAKE_CASE__ = 10_001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , _lowerCamelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 20
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44
| 0
|
"""simple docstring"""
import enum
import shutil
import sys
A = shutil.get_terminal_size()
A = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class __lowercase ( enum.Enum ):
'''simple docstring'''
__lowerCAmelCase = 0
__lowerCAmelCase = 1
def __A ( a_ :Optional[Any] , a_ :Tuple="") -> Optional[int]:
sys.stdout.write(str(_lowerCamelCase) + end)
sys.stdout.flush()
def __A ( a_ :List[Any] , a_ :str , a_ :Union[str, Any]="") -> Optional[int]:
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , _lowerCamelCase)
def __A ( ) -> int:
forceWrite('''\r''')
def __A ( a_ :int , a_ :str) -> Optional[Any]:
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""")
def __A ( ) -> List[Any]:
forceWrite(''' ''' * TERMINAL_WIDTH)
reset_cursor()
def __A ( ) -> Any:
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH)
| 160
|
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : int ) -> List[str]:
_lowerCAmelCase : Tuple = k_size // 2
_lowerCAmelCase , _lowerCAmelCase : List[str] = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
_lowerCAmelCase : Union[str, Any] = 1 / (2 * pi * sigma) * exp(-(square(_lowerCamelCase ) + square(_lowerCamelCase )) / (2 * square(_lowerCamelCase )) )
return g
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : int ,_lowerCamelCase : int ) -> Dict:
_lowerCAmelCase , _lowerCAmelCase : str = image.shape[0], image.shape[1]
# dst image height and width
_lowerCAmelCase : Optional[int] = height - k_size + 1
_lowerCAmelCase : Dict = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
_lowerCAmelCase : Tuple = zeros((dst_height * dst_width, k_size * k_size) )
_lowerCAmelCase : int = 0
for i, j in product(range(_lowerCamelCase ) ,range(_lowerCamelCase ) ):
_lowerCAmelCase : Any = ravel(image[i : i + k_size, j : j + k_size] )
_lowerCAmelCase : Union[str, Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
_lowerCAmelCase : List[Any] = gen_gaussian_kernel(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = ravel(_lowerCamelCase )
# reshape and get the dst image
_lowerCAmelCase : int = dot(_lowerCamelCase ,_lowerCamelCase ).reshape(_lowerCamelCase ,_lowerCamelCase ).astype(_lowerCamelCase )
return dst
if __name__ == "__main__":
# read original image
_a : Optional[Any] = imread(r'../image_data/lena.jpg')
# turn image in gray scale value
_a : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_a : Union[str, Any] = gaussian_filter(gray, 3, sigma=1)
_a : List[Any] = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow('gaussian filter with 3x3 mask', gaussianaxa)
imshow('gaussian filter with 5x5 mask', gaussianaxa)
waitKey()
| 44
| 0
|
"""simple docstring"""
def _A (__a ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
SCREAMING_SNAKE_CASE_ : List[str] = sorted(string.lower() )
return len(_lowerCamelCase ) == len(set(_lowerCamelCase ) )
if __name__ == "__main__":
UpperCAmelCase_ : List[Any] = input("""Enter a string """).strip()
UpperCAmelCase_ : Union[str, Any] = is_isogram(input_str)
print(f'''{input_str} is {'an' if isogram else 'not an'} isogram.''')
| 91
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
_a : List[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
_a : Union[str, Any] = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
_a : Optional[Any] = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
_a : Any = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = VOCAB_FILES_NAMES
_UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ElectraTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ):
super().__init__(
a__ , tokenizer_file=a__ , do_lower_case=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , tokenize_chinese_chars=a__ , strip_accents=a__ , **a__ , )
_lowerCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , a__ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , a__ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , a__ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(a__ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : int = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : Dict = tokenize_chinese_chars
_lowerCAmelCase : str = normalizer_class(**a__ )
_lowerCAmelCase : List[str] = do_lower_case
def __A ( self , a__ , a__=None ):
_lowerCAmelCase : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __A ( self , a__ , a__ = None ):
_lowerCAmelCase : Optional[Any] = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 44
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a : int = logging.get_logger(__name__)
a : Tuple = '▁'
a : int = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
a : Any = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
a : List[Any] = {'vinai/bartpho-syllable': 1024}
class __UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int =VOCAB_FILES_NAMES
lowerCamelCase : str =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] =["input_ids", "attention_mask"]
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__="<s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="</s>" , lowerCAmelCase__="<s>" , lowerCAmelCase__="<unk>" , lowerCAmelCase__="<pad>" , lowerCAmelCase__="<mask>" , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
# Mask token behave like a normal word, i.e. include the space before it
a : List[str] = AddedToken(a__ , lstrip=a__ , rstrip=a__ ) if isinstance(a__ , a__ ) else mask_token
a : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , cls_token=a__ , pad_token=a__ , mask_token=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
a : List[str] = vocab_file
a : str = monolingual_vocab_file
a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(a__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
a : Optional[Any] = {}
a : Tuple = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(a__ ) not in self.fairseq_tokens_to_ids:
a : Dict = cnt
cnt += 1
with open(a__ , "r" , encoding="utf-8" ) as f:
for line in f.readlines():
a : str = line.strip().split()[0]
a : Any = len(self.fairseq_tokens_to_ids )
if str(a__ ) not in self.fairseq_tokens_to_ids:
a : str = len(self.fairseq_tokens_to_ids )
a : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Union[str, Any]:
a : Dict = self.__dict__.copy()
a : str = None
a : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCAmelCase__ ) -> Union[str, Any]:
a : Tuple = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
a : int = {}
a : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Optional[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : Optional[int] = [self.cls_token_id]
a : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1, 1] + ([0] * len(a__ )) + [1]
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[Any]:
a : Union[str, Any] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __a ( self ) -> List[Any]:
return len(self.fairseq_ids_to_tokens )
def __a ( self ) -> Dict:
a : Optional[Any] = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
return self.sp_model.encode(a__ , out_type=a__ )
def __a ( self , lowerCAmelCase__ ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __a ( self , lowerCAmelCase__ ) -> str:
return self.fairseq_ids_to_tokens[index]
def __a ( self , lowerCAmelCase__ ) -> Dict:
a : Optional[int] = """""".join(a__ ).replace(a__ , " " ).strip()
return out_string
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> Dict:
if not os.path.isdir(a__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Optional[int] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
a : Optional[Any] = os.path.join(
a__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , "wb" ) as fi:
a : List[Any] = self.sp_model.serialized_model_proto()
fi.write(a__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
a__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , a__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(a__ , "w" , encoding="utf-8" ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(a__ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 105
|
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
_a : str = '\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n'
_a : List[str] = '\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n'
_a : List[Any] = '\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def __A ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""float""" ),
"""references""": datasets.Value("""float""" ),
} ) , reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html"""] , )
def __A ( self , a__ , a__ , a__=False ):
if return_pvalue:
_lowerCAmelCase : List[Any] = pearsonr(a__ , a__ )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(a__ , a__ )[0] )}
| 44
| 0
|
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class SCREAMING_SNAKE_CASE__ (SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = (DPMSolverSDEScheduler,)
__lowerCamelCase : Optional[int] = 10
def snake_case_ ( self , **a):
lowercase__ : Optional[int] = {
"""num_train_timesteps""": 1100,
"""beta_start""": 0.0_001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**a__)
return config
def snake_case_ ( self):
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=a__)
def snake_case_ ( self):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02]):
self.check_over_configs(beta_start=a__ , beta_end=a__)
def snake_case_ ( self):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=a__)
def snake_case_ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=a__)
def snake_case_ ( self):
lowercase__ : Tuple = self.scheduler_classes[0]
lowercase__ : Any = self.get_scheduler_config()
lowercase__ : Tuple = scheduler_class(**a__)
scheduler.set_timesteps(self.num_inference_steps)
lowercase__ : int = self.dummy_model()
lowercase__ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ : int = sample.to(a__)
for i, t in enumerate(scheduler.timesteps):
lowercase__ : int = scheduler.scale_model_input(a__ , a__)
lowercase__ : Optional[int] = model(a__ , a__)
lowercase__ : List[Any] = scheduler.step(a__ , a__ , a__)
lowercase__ : Dict = output.prev_sample
lowercase__ : Dict = torch.sum(torch.abs(a__))
lowercase__ : Optional[Any] = torch.mean(torch.abs(a__))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875) < 1e-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406) < 1e-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326) < 1e-3
def snake_case_ ( self):
lowercase__ : Dict = self.scheduler_classes[0]
lowercase__ : str = self.get_scheduler_config(prediction_type='v_prediction')
lowercase__ : Union[str, Any] = scheduler_class(**a__)
scheduler.set_timesteps(self.num_inference_steps)
lowercase__ : Any = self.dummy_model()
lowercase__ : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase__ : List[Any] = sample.to(a__)
for i, t in enumerate(scheduler.timesteps):
lowercase__ : int = scheduler.scale_model_input(a__ , a__)
lowercase__ : int = model(a__ , a__)
lowercase__ : List[str] = scheduler.step(a__ , a__ , a__)
lowercase__ : Optional[int] = output.prev_sample
lowercase__ : Any = torch.sum(torch.abs(a__))
lowercase__ : str = torch.mean(torch.abs(a__))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453) < 1e-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703) < 1e-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125) < 1e-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621) < 1e-3
def snake_case_ ( self):
lowercase__ : Union[str, Any] = self.scheduler_classes[0]
lowercase__ : Optional[Any] = self.get_scheduler_config()
lowercase__ : str = scheduler_class(**a__)
scheduler.set_timesteps(self.num_inference_steps , device=a__)
lowercase__ : Tuple = self.dummy_model()
lowercase__ : Optional[Any] = self.dummy_sample_deter.to(a__) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase__ : Union[str, Any] = scheduler.scale_model_input(a__ , a__)
lowercase__ : Tuple = model(a__ , a__)
lowercase__ : Optional[int] = scheduler.step(a__ , a__ , a__)
lowercase__ : List[str] = output.prev_sample
lowercase__ : int = torch.sum(torch.abs(a__))
lowercase__ : List[str] = torch.mean(torch.abs(a__))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938) < 1e-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312) < 1e-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562) < 1e-2
assert abs(result_mean.item() - 0.211_619_570_851_326) < 1e-3
def snake_case_ ( self):
lowercase__ : Any = self.scheduler_classes[0]
lowercase__ : Dict = self.get_scheduler_config()
lowercase__ : Optional[int] = scheduler_class(**a__ , use_karras_sigmas=a__)
scheduler.set_timesteps(self.num_inference_steps , device=a__)
lowercase__ : Any = self.dummy_model()
lowercase__ : int = self.dummy_sample_deter.to(a__) * scheduler.init_noise_sigma
lowercase__ : int = sample.to(a__)
for t in scheduler.timesteps:
lowercase__ : Optional[Any] = scheduler.scale_model_input(a__ , a__)
lowercase__ : Optional[int] = model(a__ , a__)
lowercase__ : Union[str, Any] = scheduler.step(a__ , a__ , a__)
lowercase__ : Tuple = output.prev_sample
lowercase__ : List[Any] = torch.sum(torch.abs(a__))
lowercase__ : Dict = torch.mean(torch.abs(a__))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672) < 1e-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811) < 1e-2
| 214
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 50 ) -> int:
_lowerCAmelCase : int = [1] * (length + 1)
for row_length in range(3 ,length + 1 ):
for block_length in range(3 ,row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 44
| 0
|
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE =SamImageProcessor()
SCREAMING_SNAKE_CASE =SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : int ,**snake_case : str ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**a__ ).image_processor
def _lowerCAmelCase ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =[np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(a__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=a__ ,padding_value=1.0 )
SCREAMING_SNAKE_CASE =SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=a__ ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,a__ )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.get_image_processor()
SCREAMING_SNAKE_CASE =SamProcessor(image_processor=a__ )
SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE =image_processor(a__ ,return_tensors='np' )
SCREAMING_SNAKE_CASE =processor(images=a__ ,return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
@require_torch
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.get_image_processor()
SCREAMING_SNAKE_CASE =SamProcessor(image_processor=a__ )
SCREAMING_SNAKE_CASE =[torch.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE =[[1764, 2646]]
SCREAMING_SNAKE_CASE =[[683, 1024]]
SCREAMING_SNAKE_CASE =processor.post_process_masks(a__ ,a__ ,a__ )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE =processor.post_process_masks(
a__ ,torch.tensor(a__ ) ,torch.tensor(a__ ) )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
# should also work with np
SCREAMING_SNAKE_CASE =[np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE =processor.post_process_masks(a__ ,np.array(a__ ) ,np.array(a__ ) )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE =[[1, 0], [0, 1]]
with self.assertRaises(a__ ):
SCREAMING_SNAKE_CASE =processor.post_process_masks(a__ ,np.array(a__ ) ,np.array(a__ ) )
@require_vision
@require_tf
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE =SamImageProcessor()
SCREAMING_SNAKE_CASE =SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : List[str] ,**snake_case : List[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**a__ ).image_processor
def _lowerCAmelCase ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =[np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(a__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self : Dict ):
SCREAMING_SNAKE_CASE =SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE =self.get_image_processor(do_normalize=a__ ,padding_value=1.0 )
SCREAMING_SNAKE_CASE =SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=a__ ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,a__ )
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.get_image_processor()
SCREAMING_SNAKE_CASE =SamProcessor(image_processor=a__ )
SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE =image_processor(a__ ,return_tensors='np' )
SCREAMING_SNAKE_CASE =processor(images=a__ ,return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
@require_tf
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =self.get_image_processor()
SCREAMING_SNAKE_CASE =SamProcessor(image_processor=a__ )
SCREAMING_SNAKE_CASE =[tf.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE =[[1764, 2646]]
SCREAMING_SNAKE_CASE =[[683, 1024]]
SCREAMING_SNAKE_CASE =processor.post_process_masks(a__ ,a__ ,a__ ,return_tensors='tf' )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE =processor.post_process_masks(
a__ ,tf.convert_to_tensor(a__ ) ,tf.convert_to_tensor(a__ ) ,return_tensors='tf' ,)
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
# should also work with np
SCREAMING_SNAKE_CASE =[np.ones((1, 3, 5, 5) )]
SCREAMING_SNAKE_CASE =processor.post_process_masks(
a__ ,np.array(a__ ) ,np.array(a__ ) ,return_tensors='tf' )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
SCREAMING_SNAKE_CASE =[[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
SCREAMING_SNAKE_CASE =processor.post_process_masks(
a__ ,np.array(a__ ) ,np.array(a__ ) ,return_tensors='tf' )
@require_vision
@require_torchvision
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
SCREAMING_SNAKE_CASE =SamImageProcessor()
SCREAMING_SNAKE_CASE =SamProcessor(a__ )
processor.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : int ,**snake_case : Any ):
return AutoProcessor.from_pretrained(self.tmpdirname ,**a__ ).image_processor
def _lowerCAmelCase ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =[np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
SCREAMING_SNAKE_CASE =[Image.fromarray(np.moveaxis(a__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def _lowerCAmelCase ( self : int ):
SCREAMING_SNAKE_CASE =self.get_image_processor()
SCREAMING_SNAKE_CASE =SamProcessor(image_processor=a__ )
SCREAMING_SNAKE_CASE =np.random.randint(0 ,2 ,size=(1, 3, 5, 5) ).astype(np.floataa )
SCREAMING_SNAKE_CASE =[tf.convert_to_tensor(a__ )]
SCREAMING_SNAKE_CASE =[torch.tensor(a__ )]
SCREAMING_SNAKE_CASE =[[1764, 2646]]
SCREAMING_SNAKE_CASE =[[683, 1024]]
SCREAMING_SNAKE_CASE =processor.post_process_masks(
a__ ,a__ ,a__ ,return_tensors='tf' )
SCREAMING_SNAKE_CASE =processor.post_process_masks(
a__ ,a__ ,a__ ,return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.get_image_processor()
SCREAMING_SNAKE_CASE =SamProcessor(image_processor=a__ )
SCREAMING_SNAKE_CASE =self.prepare_image_inputs()
SCREAMING_SNAKE_CASE =image_processor(a__ ,return_tensors='pt' )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE =processor(images=a__ ,return_tensors='pt' )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE =image_processor(a__ ,return_tensors='tf' )["""pixel_values"""].numpy()
SCREAMING_SNAKE_CASE =processor(images=a__ ,return_tensors='tf' )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(a__ ,a__ ) )
self.assertTrue(np.allclose(a__ ,a__ ) )
self.assertTrue(np.allclose(a__ ,a__ ) )
| 334
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = "naver-clova-ix/donut-base-finetuned-docvqa"
_UpperCamelCase : Dict = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_UpperCamelCase : Optional[int] = "document_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Union[str, Any] = VisionEncoderDecoderModel
_UpperCamelCase : Union[str, Any] = ["image", "text"]
_UpperCamelCase : List[str] = ["text"]
def __init__( self , *a__ , **a__ ):
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*a__ , **a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[int] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , a__ )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
a__ , add_special_tokens=a__ , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(a__ , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def __A ( self , a__ ):
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a__ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a__ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a__ , ).sequences
def __A ( self , a__ ):
_lowerCAmelCase : Tuple = self.pre_processor.batch_decode(a__ )[0]
_lowerCAmelCase : int = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[str] = re.sub(r"""<.*?>""" , """""" , a__ , count=1 ).strip() # remove first task start token
_lowerCAmelCase : List[str] = self.pre_processor.tokenajson(a__ )
return sequence["answer"]
| 44
| 0
|
"""simple docstring"""
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A_ (SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = TransfoXLTokenizer
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : int = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ : int = [
"""<unk>""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""unwanted""",
"""wa""",
"""un""",
"""running""",
""",""",
"""low""",
"""l""",
]
UpperCAmelCase_ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase__ ( self , **lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **a__ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Dict = """<unk> UNwanted , running"""
UpperCAmelCase_ : List[str] = """<unk> unwanted, running"""
return input_text, output_text
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=a__ )
UpperCAmelCase_ : Union[str, Any] = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(a__ , ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [0, 4, 8, 7] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = TransfoXLTokenizer(lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = TransfoXLTokenizer(lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = TransfoXLTokenizer(lower_case=a__ )
UpperCAmelCase_ : Dict = """Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"""
UpperCAmelCase_ : Tuple = [
"""Hello""",
"""(""",
"""bracket""",
""")""",
"""and""",
"""side""",
"""@-@""",
"""scrolled""",
"""[""",
"""and""",
"""]""",
"""Henry""",
"""'s""",
"""$""",
"""5""",
"""@,@""",
"""000""",
"""with""",
"""3""",
"""@.@""",
"""34""",
"""m""",
""".""",
"""What""",
"""'s""",
"""up""",
"""!""",
"""?""",
]
self.assertListEqual(tokenizer.tokenize(a__ ) , a__ )
self.assertEqual(tokenizer.convert_tokens_to_string(a__ ) , a__ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : str = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = len(a__ )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(a__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , "new1" )
| 61
|
"""simple docstring"""
from __future__ import annotations
_a : List[str] = 10
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : list[int] ) -> list[int]:
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCAmelCase : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCAmelCase : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
_lowerCAmelCase : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
_lowerCAmelCase : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 0
|
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase = logging.get_logger(__name__)
# General docstring
UpperCAmelCase = 'RegNetConfig'
# Base docstring
UpperCAmelCase = 'facebook/regnet-y-040'
UpperCAmelCase = [1, 1088, 7, 7]
# Image classification docstring
UpperCAmelCase = 'facebook/regnet-y-040'
UpperCAmelCase = 'tabby, tabby cat'
UpperCAmelCase = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , snake_case = 3 , snake_case = 1 , snake_case = 1 , snake_case = "relu" , **snake_case , ):
super().__init__(**a__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase = tf.keras.layers.ConvaD(
filters=a__ , kernel_size=a__ , strides=a__ , padding='VALID' , groups=a__ , use_bias=a__ , name='convolution' , )
lowercase = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
lowercase = ACTaFN[activation] if activation is not None else tf.identity
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.convolution(self.padding(a__ ) )
lowercase = self.normalization(a__ )
lowercase = self.activation(a__ )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , **snake_case ):
super().__init__(**a__ )
lowercase = config.num_channels
lowercase = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = shape_list(a__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase = tf.transpose(a__ , perm=(0, 2, 3, 1) )
lowercase = self.embedder(a__ )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , snake_case = 2 , **snake_case ):
super().__init__(**a__ )
lowercase = tf.keras.layers.ConvaD(
filters=a__ , kernel_size=1 , strides=a__ , use_bias=a__ , name='convolution' )
lowercase = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = False ):
return self.normalization(self.convolution(a__ ) , training=a__ )
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , **snake_case ):
super().__init__(**a__ )
lowercase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a__ , name='pooler' )
lowercase = [
tf.keras.layers.ConvaD(filters=a__ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=a__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
lowercase = self.pooler(a__ )
for layer_module in self.attention:
lowercase = layer_module(a__ )
lowercase = hidden_state * pooled
return hidden_state
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case = 1 , **snake_case ):
super().__init__(**a__ )
lowercase = in_channels != out_channels or stride != 1
lowercase = max(1 , out_channels // config.groups_width )
lowercase = (
TFRegNetShortCut(a__ , stride=a__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase = [
TFRegNetConvLayer(a__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
a__ , stride=a__ , groups=a__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(a__ , kernel_size=1 , activation=a__ , name='layer.2' ),
]
lowercase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = hidden_state
for layer_module in self.layers:
lowercase = layer_module(a__ )
lowercase = self.shortcut(a__ )
hidden_state += residual
lowercase = self.activation(a__ )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case = 1 , **snake_case ):
super().__init__(**a__ )
lowercase = in_channels != out_channels or stride != 1
lowercase = max(1 , out_channels // config.groups_width )
lowercase = (
TFRegNetShortCut(a__ , stride=a__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
lowercase = [
TFRegNetConvLayer(a__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
a__ , stride=a__ , groups=a__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(a__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(a__ , kernel_size=1 , activation=a__ , name='layer.3' ),
]
lowercase = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = hidden_state
for layer_module in self.layers:
lowercase = layer_module(a__ )
lowercase = self.shortcut(a__ )
hidden_state += residual
lowercase = self.activation(a__ )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , snake_case , snake_case , snake_case = 2 , snake_case = 2 , **snake_case ):
super().__init__(**a__ )
lowercase = TFRegNetXLayer if config.layer_type == """x""" else TFRegNetYLayer
lowercase = [
# downsampling is done in the first layer with stride of 2
layer(a__ , a__ , a__ , stride=a__ , name='layers.0' ),
*[layer(a__ , a__ , a__ , name=F'''layers.{i+1}''' ) for i in range(depth - 1 )],
]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
for layer_module in self.layers:
lowercase = layer_module(a__ )
return hidden_state
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , snake_case , **snake_case ):
super().__init__(**a__ )
lowercase = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
a__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(a__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(a__ , a__ , a__ , depth=a__ , name=F'''stages.{i+1}''' ) )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = False , snake_case = True ):
lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase = hidden_states + (hidden_state,)
lowercase = stage_module(a__ )
if output_hidden_states:
lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=a__ , hidden_states=a__ )
@keras_serializable
class A_ ( tf.keras.layers.Layer ):
'''simple docstring'''
_UpperCamelCase : Tuple = RegNetConfig
def __init__( self , snake_case , **snake_case ):
super().__init__(**a__ )
lowercase = config
lowercase = TFRegNetEmbeddings(a__ , name='embedder' )
lowercase = TFRegNetEncoder(a__ , name='encoder' )
lowercase = tf.keras.layers.GlobalAveragePoolingaD(keepdims=a__ , name='pooler' )
@unpack_inputs
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case = False , ):
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.embedder(a__ , training=a__ )
lowercase = self.encoder(
a__ , output_hidden_states=a__ , return_dict=a__ , training=a__ )
lowercase = encoder_outputs[0]
lowercase = self.pooler(a__ )
# Change to NCHW output format have uniformity in the modules
lowercase = tf.transpose(a__ , perm=(0, 3, 1, 2) )
lowercase = tf.transpose(a__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase = tuple([tf.transpose(a__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=a__ , pooler_output=a__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = RegNetConfig
_UpperCamelCase : List[str] = "regnet"
_UpperCamelCase : Dict = "pixel_values"
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCAmelCase = r'\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n'
UpperCAmelCase = r'\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , SCREAMING_SNAKE_CASE_ , )
class A_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , snake_case , *snake_case , **snake_case ):
super().__init__(a__ , *a__ , **a__ )
lowercase = TFRegNetMainLayer(a__ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=a__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = None , snake_case=False , ):
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.regnet(
pixel_values=a__ , output_hidden_states=a__ , return_dict=a__ , training=a__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"""\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , SCREAMING_SNAKE_CASE_ , )
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self , snake_case , *snake_case , **snake_case ):
super().__init__(a__ , *a__ , **a__ )
lowercase = config.num_labels
lowercase = TFRegNetMainLayer(a__ , name='regnet' )
# classification head
lowercase = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(a__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=a__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE__ ( self , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case=False , ):
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.regnet(
a__ , output_hidden_states=a__ , return_dict=a__ , training=a__ )
lowercase = outputs.pooler_output if return_dict else outputs[1]
lowercase = self.classifier[0](a__ )
lowercase = self.classifier[1](a__ )
lowercase = None if labels is None else self.hf_compute_loss(labels=a__ , logits=a__ )
if not return_dict:
lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=a__ , logits=a__ , hidden_states=outputs.hidden_states )
| 195
|
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
| 0
|
import os
import pytest
from attr import dataclass
UpperCAmelCase_ = 'us-east-1' # defaults region
@dataclass
class lowercase__ :
'''simple docstring'''
a : str
a : Any = "arn:aws:iam::558105141721:role/sagemaker_execution_role"
a : Tuple = {
"task_name": "mnli",
"per_device_train_batch_size": 16,
"per_device_eval_batch_size": 16,
"do_train": True,
"do_eval": True,
"do_predict": True,
"output_dir": "/opt/ml/model",
"overwrite_output_dir": True,
"max_steps": 500,
"save_steps": 5500,
}
a : List[str] = {**hyperparameters, "max_steps": 1000}
@property
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
return f"{self.framework}-transfromers-test"
@property
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[Any] ) -> List[str]:
UpperCamelCase__ : Optional[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
| 201
|
"""simple docstring"""
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=False , a__=True , a__="None" , a__=3 , a__=4 , a__=None , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : List[Any] = seq_length
_lowerCAmelCase : Dict = is_training
_lowerCAmelCase : Dict = use_input_mask
_lowerCAmelCase : int = use_token_type_ids
_lowerCAmelCase : int = use_labels
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Tuple = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Tuple = type_sequence_label_size
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : Tuple = relative_attention
_lowerCAmelCase : Tuple = position_biased_input
_lowerCAmelCase : Dict = pos_att_type
_lowerCAmelCase : Any = scope
def __A ( self ):
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
_lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_lowerCAmelCase : str = None
if self.use_token_type_ids:
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Union[str, Any] = None
_lowerCAmelCase : Any = None
if self.use_labels:
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __A ( self ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __A ( self , a__ ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : List[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : List[Any] = model(a__ , token_type_ids=a__ )[0]
_lowerCAmelCase : Any = model(a__ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[str] = DebertaVaForMaskedLM(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : int = self.num_labels
_lowerCAmelCase : int = DebertaVaForSequenceClassification(a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a__ )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : str = DebertaVaForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Any = DebertaVaForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Dict = model(
a__ , attention_mask=a__ , token_type_ids=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __A ( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = DebertaVaForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
_lowerCAmelCase : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : List[str] = model(
a__ , attention_mask=a__ , token_type_ids=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __A ( self ):
_lowerCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) : Union[str, Any] = config_and_inputs
_lowerCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __A ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : Union[str, Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
{
"feature-extraction": DebertaVaModel,
"fill-mask": DebertaVaForMaskedLM,
"question-answering": DebertaVaForQuestionAnswering,
"text-classification": DebertaVaForSequenceClassification,
"token-classification": DebertaVaForTokenClassification,
"zero-shot": DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[Any] = False
_UpperCamelCase : List[Any] = False
_UpperCamelCase : Dict = False
_UpperCamelCase : Tuple = False
def __A ( self ):
_lowerCAmelCase : Optional[Any] = DebertaVaModelTester(self )
_lowerCAmelCase : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
def __A ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a__ )
def __A ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a__ )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a__ )
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*a__ )
@slow
def __A ( self ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Tuple = DebertaVaModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@unittest.skip(reason="""Model not available yet""" )
def __A ( self ):
pass
@slow
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_lowerCAmelCase : Dict = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(a__ , attention_mask=a__ )[0]
# compare the actual values for a slice.
_lowerCAmelCase : str = torch.tensor(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a__ , atol=1e-4 ) , F"{output[:, 1:4, 1:4]}" )
| 44
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowerCamelCase = logging.get_logger(__name__)
@dataclass
class _a ( SCREAMING_SNAKE_CASE_):
_a : List[str] = [
"no_inference",
"no_cuda",
"no_tpu",
"no_speed",
"no_memory",
"no_env_print",
"no_multi_process",
]
def __init__( self : Dict , **_SCREAMING_SNAKE_CASE : List[Any] )-> List[Any]:
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
lowerCAmelCase__ : Tuple = deprecated_arg[3:]
setattr(self , a__ , not kwargs.pop(a__ ) )
logger.warning(
F'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
F' {positive_arg}={kwargs[positive_arg]}' )
lowerCAmelCase__ : List[Any] = kwargs.pop('''torchscript''' , self.torchscript )
lowerCAmelCase__ : List[str] = kwargs.pop('''torch_xla_tpu_print_metrics''' , self.torch_xla_tpu_print_metrics )
lowerCAmelCase__ : List[str] = kwargs.pop('''fp16_opt_level''' , self.fpaa_opt_level )
super().__init__(**a__ )
_a : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Trace the models using torchscript'''})
_a : bool = field(default=SCREAMING_SNAKE_CASE_ , metadata={'''help''': '''Print Xla/PyTorch tpu metrics'''})
_a : str = field(
default='''O1''' , metadata={
'''help''': (
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '''
'''See details at https://nvidia.github.io/apex/amp.html'''
)
} , )
@cached_property
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
requires_backends(self , ['''torch'''] )
logger.info('''PyTorch: setting up devices''' )
if not self.cuda:
lowerCAmelCase__ : int = torch.device('''cpu''' )
lowerCAmelCase__ : Union[str, Any] = 0
elif is_torch_tpu_available():
lowerCAmelCase__ : str = xm.xla_device()
lowerCAmelCase__ : Optional[Any] = 0
else:
lowerCAmelCase__ : Union[str, Any] = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' )
lowerCAmelCase__ : Optional[Any] = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCAmelCase__( self : Dict )-> Union[str, Any]:
return is_torch_tpu_available() and self.tpu
@property
def UpperCAmelCase__( self : Tuple )-> str:
requires_backends(self , ['''torch'''] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCAmelCase__( self : Optional[int] )-> str:
requires_backends(self , ['''torch'''] )
return self._setup_devices[0]
@property
def UpperCAmelCase__( self : Dict )-> Optional[Any]:
requires_backends(self , ['''torch'''] )
return self._setup_devices[1]
@property
def UpperCAmelCase__( self : Dict )-> Union[str, Any]:
return self.n_gpu > 0
| 131
|
"""simple docstring"""
import numpy as np
import qiskit
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int = 8 ,_lowerCamelCase : int | None = None ) -> str:
_lowerCAmelCase : int = np.random.default_rng(seed=_lowerCamelCase )
# Roughly 25% of the qubits will contribute to the key.
# So we take more than we need.
_lowerCAmelCase : Tuple = 6 * key_len
# Measurement basis for Alice's qubits.
_lowerCAmelCase : Dict = rng.integers(2 ,size=_lowerCamelCase )
# The set of states Alice will prepare.
_lowerCAmelCase : Tuple = rng.integers(2 ,size=_lowerCamelCase )
# Measurement basis for Bob's qubits.
_lowerCAmelCase : Union[str, Any] = rng.integers(2 ,size=_lowerCamelCase )
# Quantum Circuit to simulate BB84
_lowerCAmelCase : Dict = qiskit.QuantumCircuit(_lowerCamelCase ,name="""BB84""" )
# Alice prepares her qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if alice_state[index] == 1:
bbaa_circ.x(_lowerCamelCase )
if alice_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
# Bob measures the received qubits according to rules above.
for index, _ in enumerate(_lowerCamelCase ):
if bob_basis[index] == 1:
bbaa_circ.h(_lowerCamelCase )
bbaa_circ.barrier()
bbaa_circ.measure_all()
# Simulate the quantum circuit.
_lowerCAmelCase : int = qiskit.Aer.get_backend("""aer_simulator""" )
# We only need to run one shot because the key is unique.
# Multiple shots will produce the same key.
_lowerCAmelCase : List[str] = qiskit.execute(_lowerCamelCase ,_lowerCamelCase ,shots=1 ,seed_simulator=_lowerCamelCase )
# Returns the result of measurement.
_lowerCAmelCase : List[Any] = job.result().get_counts(_lowerCamelCase ).most_frequent()
# Extracting the generated key from the simulation results.
# Only keep measurement results where Alice and Bob chose the same basis.
_lowerCAmelCase : str = """""".join(
[
result_bit
for alice_basis_bit, bob_basis_bit, result_bit in zip(
_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if alice_basis_bit == bob_basis_bit
] )
# Get final key. Pad with 0 if too short, otherwise truncate.
_lowerCAmelCase : List[Any] = gen_key[:key_len] if len(_lowerCamelCase ) >= key_len else gen_key.ljust(_lowerCamelCase ,"""0""" )
return key
if __name__ == "__main__":
print(F"""The generated key is : {bbaa(8, seed=0)}""")
from doctest import testmod
testmod()
| 44
| 0
|
'''simple docstring'''
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ) -> bool:
__lowerCamelCase : Union[str, Any] = len(_lowerCamelCase ) + 1
__lowerCamelCase : Any = len(_lowerCamelCase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__lowerCamelCase : Union[str, Any] = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
# since string of zero length match pattern of zero length
__lowerCamelCase : Tuple = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 ,_lowerCamelCase ):
__lowerCamelCase : List[str] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 ,_lowerCamelCase ):
__lowerCamelCase : Any = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 ,_lowerCamelCase ):
for j in range(1 ,_lowerCamelCase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__lowerCamelCase : int = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__lowerCamelCase : Any = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__lowerCamelCase : Optional[Any] = dp[i - 1][j]
else:
__lowerCamelCase : Dict = 0
else:
__lowerCamelCase : str = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCamelCase = 'aab'
_UpperCamelCase = 'c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 208
|
"""simple docstring"""
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_a : Union[str, Any] = re.compile('[^A-Za-z_0-9]')
# parameters used in DuplicationIndex
_a : List[str] = 10
_a : List[Any] = 256
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ) -> Optional[MinHash]:
if len(_lowerCamelCase ) < MIN_NUM_TOKENS:
return None
_lowerCAmelCase : Optional[Any] = MinHash(num_perm=_lowerCamelCase )
for token in set(_lowerCamelCase ):
min_hash.update(token.encode() )
return min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ) -> Set[str]:
return {t for t in NON_ALPHA.split(_lowerCamelCase ) if len(t.strip() ) > 0}
class __A :
def __init__( self , *,
a__ = 0.8_5 , ):
_lowerCAmelCase : List[Any] = duplication_jaccard_threshold
_lowerCAmelCase : Union[str, Any] = NUM_PERM
_lowerCAmelCase : Optional[int] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_lowerCAmelCase : Optional[int] = defaultdict(a__ )
def __A ( self , a__ , a__ ):
_lowerCAmelCase : Optional[Any] = self._index.query(a__ )
if code_key in self._index.keys:
print(F"Duplicate key {code_key}" )
return
self._index.insert(a__ , a__ )
if len(a__ ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(a__ )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(a__ )
def __A ( self ):
_lowerCAmelCase : int = []
for base, duplicates in self._duplicate_clusters.items():
_lowerCAmelCase : List[str] = [base] + list(a__ )
# reformat the cluster to be a list of dict
_lowerCAmelCase : List[Any] = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(a__ )
return duplicate_clusters
def __A ( self , a__ ):
_lowerCAmelCase : Dict = self.get_duplicate_clusters()
with open(a__ , """w""" ) as f:
json.dump(a__ , a__ )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : Tuple = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ) -> Optional[Any]:
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash ,ThreadedIterator(_lowerCamelCase ,max_queue_size=10000 ) ,chunksize=100 ,):
if data is not None:
yield data
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float ) -> List[str]:
_lowerCAmelCase : Optional[Any] = DuplicationIndex(duplication_jaccard_threshold=_lowerCamelCase )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(_lowerCamelCase ) ) ,max_queue_size=100 ) ):
di.add(_lowerCamelCase ,_lowerCamelCase )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : str ,_lowerCamelCase : str ) -> float:
_lowerCAmelCase : Any = get_tokens(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = get_tokens(_lowerCamelCase )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_a : str = None
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : List[Any] ) -> Dict:
_lowerCAmelCase : int = []
for elementa in cluster:
_lowerCAmelCase : Dict = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_lowerCAmelCase : Any = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(_lowerCamelCase ,_lowerCamelCase ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_lowerCAmelCase : Any = 1
extremes.append(_lowerCamelCase )
return extremes
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ) -> str:
global _shared_dataset
_lowerCAmelCase : Tuple = dataset
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Optional[Any] = partial(_find_cluster_extremes_shared ,jaccard_threshold=_lowerCamelCase )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
_lowerCamelCase ,_lowerCamelCase ,) ,total=len(_lowerCamelCase ) ,):
extremes_list.append(_lowerCamelCase )
return extremes_list
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Type[Dataset] ,_lowerCamelCase : float = 0.85 ) -> Tuple[Type[Dataset], List[List[Dict]]]:
_lowerCAmelCase : Tuple = make_duplicate_clusters(_lowerCamelCase ,_lowerCamelCase )
_lowerCAmelCase : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : Tuple = find_extremes(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
for extremes in extremes_clusters:
for element in extremes:
_lowerCAmelCase : Union[str, Any] = element
_lowerCAmelCase : List[Any] = duplicate_indices - set(extreme_dict.keys() )
_lowerCAmelCase : List[Any] = dataset.filter(lambda _lowerCamelCase ,_lowerCamelCase : idx not in remove_indices ,with_indices=_lowerCamelCase )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_lowerCAmelCase : Tuple = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_lowerCAmelCase : Dict = extreme_dict[element["""base_index"""]]["""copies"""]
print(f"Original dataset size: {len(_lowerCamelCase )}" )
print(f"Number of duplicate clusters: {len(_lowerCamelCase )}" )
print(f"Files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Unique files in duplicate cluster: {len(_lowerCamelCase )}" )
print(f"Filtered dataset size: {len(_lowerCamelCase )}" )
return ds_filter, duplicate_clusters
| 44
| 0
|
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 20
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44
| 0
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=64 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=64 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
__a : Optional[int] = parent
__a : Optional[Any] = batch_size
__a : Optional[int] = seq_length
__a : str = is_training
__a : List[str] = use_input_mask
__a : Union[str, Any] = use_token_type_ids
__a : Any = use_labels
__a : Any = vocab_size
__a : str = hidden_size
__a : str = num_hidden_layers
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = intermediate_size
__a : Tuple = hidden_act
__a : int = hidden_dropout_prob
__a : List[Any] = attention_probs_dropout_prob
__a : str = max_position_embeddings
__a : int = type_vocab_size
__a : Any = type_sequence_label_size
__a : List[str] = initializer_range
__a : List[Any] = num_labels
__a : Optional[Any] = num_choices
__a : Optional[Any] = scope
def _lowerCamelCase ( self ):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' )
def _lowerCamelCase ( self ):
__a : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Dict = None
if self.use_input_mask:
__a : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__a : Optional[Any] = None
__a : Optional[Any] = None
__a : List[Any] = None
if self.use_labels:
__a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__a : int = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : List[Any] = MPNetModel(config=a__ )
model.to(a__ )
model.eval()
__a : Union[str, Any] = model(a__ , a__ )
__a : int = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = MPNetForQuestionAnswering(config=a__ )
model.to(a__ )
model.eval()
__a : str = model(
a__ , attention_mask=a__ , start_positions=a__ , end_positions=a__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = self.num_labels
__a : Union[str, Any] = MPNetForSequenceClassification(a__ )
model.to(a__ )
model.eval()
__a : List[str] = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = self.num_choices
__a : Tuple = MPNetForMultipleChoice(config=a__ )
model.to(a__ )
model.eval()
__a : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[Any] = model(
a__ , attention_mask=a__ , labels=a__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Union[str, Any] = self.num_labels
__a : List[str] = MPNetForTokenClassification(config=a__ )
model.to(a__ )
model.eval()
__a : Dict = model(a__ , attention_mask=a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self ):
__a : Tuple = self.prepare_config_and_inputs()
(__a) : int = config_and_inputs
__a : str = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowercase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__lowerCAmelCase = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = True
def _lowerCamelCase ( self ):
__a : int = MPNetModelTester(self )
__a : Any = ConfigTester(self , config_class=a__ , hidden_size=37 )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*a__ )
def _lowerCamelCase ( self ):
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*a__ )
def _lowerCamelCase ( self ):
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*a__ )
def _lowerCamelCase ( self ):
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*a__ )
def _lowerCamelCase ( self ):
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*a__ )
@require_torch
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
__a : Optional[Any] = MPNetModel.from_pretrained('''microsoft/mpnet-base''' )
__a : str = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__a : Optional[int] = model(a__ )[0]
__a : List[Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , a__ )
__a : str = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) )
| 160
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44
| 0
|
"""simple docstring"""
from __future__ import annotations
UpperCAmelCase_ : List[str] = 10
def _A (__a ) -> list[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = max(_lowerCamelCase )
while placement <= max_digit:
# declare and initialize empty buckets
SCREAMING_SNAKE_CASE_ : list[list] = [[] for _ in range(_lowerCamelCase )]
# split list_of_ints between the buckets
for i in list_of_ints:
SCREAMING_SNAKE_CASE_ : Tuple = int((i / placement) % RADIX )
buckets[tmp].append(_lowerCamelCase )
# put each buckets' contents into list_of_ints
SCREAMING_SNAKE_CASE_ : List[str] = 0
for b in range(_lowerCamelCase ):
for i in buckets[b]:
SCREAMING_SNAKE_CASE_ : Any = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 91
|
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> Dict:
_lowerCAmelCase : List[Any] = torch.exp(_lowerCamelCase )
_lowerCAmelCase : List[Any] = torch.sum(_lowerCamelCase ,dim=1 ) # sum of exp(x_i)
_lowerCAmelCase : Dict = torch.sum(x * exp_x ,dim=1 ) # sum of x_i * exp(x_i)
return torch.log(_lowerCamelCase ) - B / A
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : int = config.output_attentions
_lowerCAmelCase : Any = config.output_hidden_states
_lowerCAmelCase : List[Any] = nn.ModuleList([BertLayer(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : Any = nn.ModuleList([BertHighway(a__ ) for _ in range(config.num_hidden_layers )] )
_lowerCAmelCase : str = [-1 for _ in range(config.num_hidden_layers )]
def __A ( self , a__ ):
if (type(a__ ) is float) or (type(a__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
_lowerCAmelCase : Tuple = x
else:
_lowerCAmelCase : Optional[int] = x
def __A ( self , a__ ):
_lowerCAmelCase : Optional[int] = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def __A ( self , a__ , a__=None , a__=None , a__=None , a__=None , ):
_lowerCAmelCase : Any = ()
_lowerCAmelCase : Optional[int] = ()
_lowerCAmelCase : List[Any] = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
_lowerCAmelCase : str = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[str] = layer_module(
a__ , a__ , head_mask[i] , a__ , a__ )
_lowerCAmelCase : Union[str, Any] = layer_outputs[0]
if self.output_attentions:
_lowerCAmelCase : Dict = all_attentions + (layer_outputs[1],)
_lowerCAmelCase : Optional[int] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : Union[str, Any] = current_outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Optional[int] = current_outputs + (all_attentions,)
_lowerCAmelCase : Optional[Any] = self.highway[i](a__ )
# logits, pooled_output
if not self.training:
_lowerCAmelCase : Tuple = highway_exit[0]
_lowerCAmelCase : Any = entropy(a__ )
_lowerCAmelCase : Optional[Any] = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
_lowerCAmelCase : Union[str, Any] = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
_lowerCAmelCase : List[str] = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(a__ , i + 1 )
else:
_lowerCAmelCase : Dict = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
_lowerCAmelCase : List[Any] = all_hidden_states + (hidden_states,)
_lowerCAmelCase : List[Any] = (hidden_states,)
if self.output_hidden_states:
_lowerCAmelCase : List[str] = outputs + (all_hidden_states,)
if self.output_attentions:
_lowerCAmelCase : Any = outputs + (all_attentions,)
_lowerCAmelCase : Optional[int] = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"The Bert Model transformer with early exiting (DeeBERT). " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : Tuple = BertEmbeddings(a__ )
_lowerCAmelCase : Tuple = DeeBertEncoder(a__ )
_lowerCAmelCase : List[str] = BertPooler(a__ )
self.init_weights()
def __A ( self ):
self.encoder.init_highway_pooler(self.pooler )
def __A ( self ):
return self.embeddings.word_embeddings
def __A ( self , a__ ):
_lowerCAmelCase : Dict = value
def __A ( self , a__ ):
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(a__ )
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("""You cannot specify both input_ids and inputs_embeds at the same time""" )
elif input_ids is not None:
_lowerCAmelCase : Any = input_ids.size()
elif inputs_embeds is not None:
_lowerCAmelCase : List[str] = inputs_embeds.size()[:-1]
else:
raise ValueError("""You have to specify either input_ids or inputs_embeds""" )
_lowerCAmelCase : str = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
_lowerCAmelCase : List[Any] = torch.ones(a__ , device=a__ )
if encoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = torch.ones(a__ , device=a__ )
if token_type_ids is None:
_lowerCAmelCase : Dict = torch.zeros(a__ , dtype=torch.long , device=a__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
_lowerCAmelCase : torch.Tensor = self.get_extended_attention_mask(a__ , a__ , a__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
_lowerCAmelCase : Dict = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
_lowerCAmelCase : Tuple = encoder_attention_mask[:, None, None, :]
_lowerCAmelCase : Union[str, Any] = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
_lowerCAmelCase : Optional[Any] = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
_lowerCAmelCase : Optional[int] = self.get_head_mask(a__ , self.config.num_hidden_layers )
_lowerCAmelCase : Dict = self.embeddings(
input_ids=a__ , position_ids=a__ , token_type_ids=a__ , inputs_embeds=a__ )
_lowerCAmelCase : Union[str, Any] = self.encoder(
a__ , attention_mask=a__ , head_mask=a__ , encoder_hidden_states=a__ , encoder_attention_mask=a__ , )
_lowerCAmelCase : Dict = encoder_outputs[0]
_lowerCAmelCase : Union[str, Any] = self.pooler(a__ )
_lowerCAmelCase : Dict = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ , a__ ):
_lowerCAmelCase : str = message
_lowerCAmelCase : str = exit_layer # start from 1!
class __A ( nn.Module ):
def __init__( self , a__ ):
super().__init__()
_lowerCAmelCase : Any = BertPooler(a__ )
_lowerCAmelCase : str = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.num_labels )
def __A ( self , a__ ):
# Pooler
_lowerCAmelCase : Tuple = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(a__ )
# "return" pooler_output
# BertModel
_lowerCAmelCase : Union[str, Any] = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
_lowerCAmelCase : Optional[int] = bmodel_output[1]
_lowerCAmelCase : Tuple = self.dropout(a__ )
_lowerCAmelCase : Dict = self.classifier(a__ )
return logits, pooled_output
@add_start_docstrings(
"Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. " , SCREAMING_SNAKE_CASE_ , )
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self , a__ ):
super().__init__(a__ )
_lowerCAmelCase : List[str] = config.num_labels
_lowerCAmelCase : Optional[Any] = config.num_hidden_layers
_lowerCAmelCase : str = DeeBertModel(a__ )
_lowerCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
_lowerCAmelCase : List[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(a__ )
def __A ( self , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=None , a__=-1 , a__=False , ):
_lowerCAmelCase : Dict = self.num_layers
try:
_lowerCAmelCase : str = self.bert(
a__ , attention_mask=a__ , token_type_ids=a__ , position_ids=a__ , head_mask=a__ , inputs_embeds=a__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
_lowerCAmelCase : Any = outputs[1]
_lowerCAmelCase : Optional[int] = self.dropout(a__ )
_lowerCAmelCase : List[str] = self.classifier(a__ )
_lowerCAmelCase : Union[str, Any] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_lowerCAmelCase : Tuple = e.message
_lowerCAmelCase : int = e.exit_layer
_lowerCAmelCase : Union[str, Any] = outputs[0]
if not self.training:
_lowerCAmelCase : Tuple = entropy(a__ )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Optional[Any] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : Tuple = MSELoss()
_lowerCAmelCase : int = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Any = CrossEntropyLoss()
_lowerCAmelCase : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_lowerCAmelCase : Optional[Any] = []
for highway_exit in outputs[-1]:
_lowerCAmelCase : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(a__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_lowerCAmelCase : List[Any] = MSELoss()
_lowerCAmelCase : int = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_lowerCAmelCase : Optional[int] = CrossEntropyLoss()
_lowerCAmelCase : List[Any] = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(a__ )
if train_highway:
_lowerCAmelCase : List[Any] = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_lowerCAmelCase : Any = (loss,) + outputs
if not self.training:
_lowerCAmelCase : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_lowerCAmelCase : Dict = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
| 0
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( _lowercase : list[int] , _lowercase : list[int] , _lowercase : int ) ->tuple[float, list[float]]:
'''simple docstring'''
a : List[str] = list(range(len(_lowerCamelCase ) ) )
a : int = [v / w for v, w in zip(_lowerCamelCase , _lowerCamelCase )]
index.sort(key=lambda _lowercase : ratio[i] , reverse=_lowerCamelCase )
a : float = 0
a : list[float] = [0] * len(_lowerCamelCase )
for i in index:
if weight[i] <= capacity:
a : Dict = 1
max_value += value[i]
capacity -= weight[i]
else:
a : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105
|
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ""
_UpperCamelCase : str = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , a__ = None , a__ = None , **a__ , ):
super().__init__(self , **a__ )
_lowerCAmelCase : Any = repo_info
_lowerCAmelCase : Optional[Any] = token
_lowerCAmelCase : Optional[int] = None
def __A ( self ):
if self.dir_cache is None:
_lowerCAmelCase : Optional[Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_lowerCAmelCase : Any = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(a__ ): {"""name""": str(a__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , a__ , a__ = "rb" , **a__ , ):
if not isinstance(self.repo_info , a__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_lowerCAmelCase : Tuple = hf_hub_url(self.repo_info.id , a__ , revision=self.repo_info.sha )
return fsspec.open(
a__ , mode=a__ , headers=get_authentication_headers_for_url(a__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __A ( self , a__ , **a__ ):
self._get_dirs()
_lowerCAmelCase : Union[str, Any] = self._strip_protocol(a__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(a__ )
def __A ( self , a__ , a__=False , **a__ ):
self._get_dirs()
_lowerCAmelCase : Any = PurePosixPath(path.strip("""/""" ) )
_lowerCAmelCase : List[str] = {}
for p, f in self.dir_cache.items():
_lowerCAmelCase : Any = PurePosixPath(p.strip("""/""" ) )
_lowerCAmelCase : Optional[int] = p.parent
if root == path:
_lowerCAmelCase : Dict = f
_lowerCAmelCase : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 44
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class SCREAMING_SNAKE_CASE__ (unittest.TestCase ):
def __init__( self , a , a=7 , a=3 , a=30 , a=400 , a=True , a=None , a=0.9 , a=None , a=True , a=[0.5, 0.5, 0.5] , a=[0.5, 0.5, 0.5] , ):
lowercase__ : Dict = size if size is not None else {"""shortest_edge""": 30}
lowercase__ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
lowercase__ : List[str] = parent
lowercase__ : int = batch_size
lowercase__ : Optional[Any] = num_channels
lowercase__ : List[Any] = min_resolution
lowercase__ : Tuple = max_resolution
lowercase__ : Optional[int] = do_resize_and_center_crop
lowercase__ : Dict = size
lowercase__ : int = crop_pct
lowercase__ : Optional[Any] = crop_size
lowercase__ : Tuple = do_normalize
lowercase__ : List[str] = image_mean
lowercase__ : Optional[Any] = image_std
def snake_case_ ( self):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ (SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
__lowerCamelCase : List[Any] = PoolFormerImageProcessor if is_vision_available() else None
def snake_case_ ( self):
lowercase__ : Tuple = PoolFormerImageProcessingTester(self)
@property
def snake_case_ ( self):
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case_ ( self):
lowercase__ : Dict = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(a__ , 'do_resize_and_center_crop'))
self.assertTrue(hasattr(a__ , 'size'))
self.assertTrue(hasattr(a__ , 'crop_pct'))
self.assertTrue(hasattr(a__ , 'do_normalize'))
self.assertTrue(hasattr(a__ , 'image_mean'))
self.assertTrue(hasattr(a__ , 'image_std'))
def snake_case_ ( self):
lowercase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'shortest_edge': 30})
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30})
lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {'shortest_edge': 42})
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84})
def snake_case_ ( self):
pass
def snake_case_ ( self):
# Initialize image_processing
lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
lowercase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__)
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image)
# Test not batched input
lowercase__ : Any = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : str = image_processing(a__ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def snake_case_ ( self):
# Initialize image_processing
lowercase__ : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
lowercase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__)
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray)
# Test not batched input
lowercase__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : List[Any] = image_processing(a__ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def snake_case_ ( self):
# Initialize image_processing
lowercase__ : str = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__)
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor)
# Test not batched input
lowercase__ : Dict = image_processing(image_inputs[0] , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
lowercase__ : Any = image_processing(a__ , return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 214
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = KandinskyImgaImgPipeline
_UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
_lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCAmelCase : int = MultilingualCLIP(a__ )
_lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : int = self.dummy_unet
_lowerCAmelCase : Dict = self.dummy_movq
_lowerCAmelCase : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ )
_lowerCAmelCase : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , a__ , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**a__ )
_lowerCAmelCase : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k"""
_lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
_lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Any = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase : Union[str, Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 44
| 0
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
def wrapper(*lowerCAmelCase_, **lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE =timeit.default_timer()
SCREAMING_SNAKE_CASE =func(*_lowerCamelCase, **_lowerCamelCase )
SCREAMING_SNAKE_CASE =timeit.default_timer() - starttime
return delta
SCREAMING_SNAKE_CASE =func.__name__
return wrapper
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_=100, lowerCAmelCase_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =[]
SCREAMING_SNAKE_CASE =seq_shapes or {}
for i in range(_lowerCamelCase ):
SCREAMING_SNAKE_CASE ={}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_lowerCamelCase, _ArrayXD ):
SCREAMING_SNAKE_CASE =np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_lowerCamelCase, datasets.Value ):
if v.dtype == "string":
SCREAMING_SNAKE_CASE ="""The small grey turtle was surprisingly fast when challenged."""
else:
SCREAMING_SNAKE_CASE =np.random.randint(10, size=1 ).astype(v.dtype ).item()
elif isinstance(_lowerCamelCase, datasets.Sequence ):
while isinstance(_lowerCamelCase, datasets.Sequence ):
SCREAMING_SNAKE_CASE =v.feature
SCREAMING_SNAKE_CASE =seq_shapes[k]
SCREAMING_SNAKE_CASE =np.random.rand(*_lowerCamelCase ).astype(v.dtype )
SCREAMING_SNAKE_CASE =data
dummy_data.append((i, example) )
return dummy_data
def snake_case__ ( lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_=100, lowerCAmelCase_=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =generate_examples(_lowerCamelCase, num_examples=_lowerCamelCase, seq_shapes=_lowerCamelCase )
with ArrowWriter(features=_lowerCamelCase, path=_lowerCamelCase ) as writer:
for key, record in dummy_data:
SCREAMING_SNAKE_CASE =features.encode_example(_lowerCamelCase )
writer.write(_lowerCamelCase )
SCREAMING_SNAKE_CASE =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
SCREAMING_SNAKE_CASE =datasets.Dataset.from_file(filename=_lowerCamelCase, info=datasets.DatasetInfo(features=_lowerCamelCase ) )
return dataset
| 334
|
"""simple docstring"""
from math import ceil
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ,_lowerCamelCase : Union[str, Any] ) -> int:
_lowerCAmelCase : Dict = list(range(0 ,_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_lowerCAmelCase : Union[str, Any] = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
_lowerCAmelCase : int = [i for i in blocks if i not in device_map_blocks]
_lowerCAmelCase : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Tuple ) -> str:
_lowerCAmelCase : Optional[Any] = list(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) )
_lowerCAmelCase : Optional[int] = [layers[i : i + n_blocks] for i in range(0 ,_lowerCamelCase ,_lowerCamelCase )]
return dict(zip(_lowerCamelCase ,_lowerCamelCase ) )
| 44
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.