code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_lowercase : int = 299792458
# Symbols
_lowercase , _lowercase , _lowercase , _lowercase : Dict = symbols("ct x y z")
def lowerCamelCase ( UpperCAmelCase__ : float ) -> float:
if velocity > c:
raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("""Speed must be greater than or equal to 1!""" )
return velocity / c
def lowerCamelCase ( UpperCAmelCase__ : float ) -> float:
return 1 / sqrt(1 - beta(UpperCAmelCase__ ) ** 2 )
def lowerCamelCase ( UpperCAmelCase__ : float ) -> np.ndarray:
return np.array(
[
[gamma(UpperCAmelCase__ ), -gamma(UpperCAmelCase__ ) * beta(UpperCAmelCase__ ), 0, 0],
[-gamma(UpperCAmelCase__ ) * beta(UpperCAmelCase__ ), gamma(UpperCAmelCase__ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowerCamelCase ( UpperCAmelCase__ : float , UpperCAmelCase__ : np.ndarray | None = None ) -> np.ndarray:
# Ensure event is not empty
if event is None:
lowercase_ : Any = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(UpperCAmelCase__ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_lowercase : List[Any] = transform(29979245)
print("Example of four vector: ")
print(f"""ct' = {four_vector[0]}""")
print(f"""x' = {four_vector[1]}""")
print(f"""y' = {four_vector[2]}""")
print(f"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
_lowercase : Optional[Any] = {ct: c, x: 1, y: 1, z: 1}
_lowercase : int = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f"""\n{numerical_vector}""")
| 30
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 1
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] ) -> List[str]:
# ===== initialization =====
lowercase_ : List[Any] = Mock()
lowercase_ : str = conn, Mock()
lowercase_ : str = iter([1, None] )
lowercase_ : Optional[Any] = lambda UpperCAmelCase__ : next(UpperCAmelCase__ )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=UpperCAmelCase__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 30
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __magic_name__ :
UpperCamelCase__ = PegasusConfig
UpperCamelCase__ = {}
UpperCamelCase__ = '''gelu'''
def __init__( self : List[Any] , lowercase_ : int , lowercase_ : Optional[int]=13 , lowercase_ : Dict=7 , lowercase_ : Tuple=True , lowercase_ : List[str]=False , lowercase_ : Dict=99 , lowercase_ : Any=32 , lowercase_ : Union[str, Any]=2 , lowercase_ : str=4 , lowercase_ : Optional[int]=37 , lowercase_ : List[Any]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[int]=40 , lowercase_ : Any=2 , lowercase_ : Optional[int]=1 , lowercase_ : int=0 , ):
lowercase_ : int = parent
lowercase_ : int = batch_size
lowercase_ : str = seq_length
lowercase_ : List[Any] = is_training
lowercase_ : List[Any] = use_labels
lowercase_ : Optional[Any] = vocab_size
lowercase_ : List[Any] = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : Optional[int] = num_attention_heads
lowercase_ : Tuple = intermediate_size
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Optional[Any] = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Union[str, Any] = eos_token_id
lowercase_ : Any = pad_token_id
lowercase_ : Tuple = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowercase_ : int = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowercase_ : Dict = tf.concat([input_ids, eos_tensor] , axis=1 )
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : List[str] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowercase_ : int = prepare_pegasus_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : Union[str, Any] ):
lowercase_ : Any = TFPegasusModel(config=lowercase_ ).get_decoder()
lowercase_ : Optional[Any] = inputs_dict["""input_ids"""]
lowercase_ : Dict = input_ids[:1, :]
lowercase_ : int = inputs_dict["""attention_mask"""][:1, :]
lowercase_ : Dict = inputs_dict["""head_mask"""]
lowercase_ : str = 1
# first forward pass
lowercase_ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ )
lowercase_ , lowercase_ : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase_ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowercase_ : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowercase_ : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowercase_ : Optional[Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowercase_ : int = model(lowercase_ , attention_mask=lowercase_ )[0]
lowercase_ : int = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowercase_ : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowercase_ : int = output_from_no_past[:, -3:, random_slice_idx]
lowercase_ : Dict = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1E-3 )
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : Optional[int]=None , ) -> Dict:
if attention_mask is None:
lowercase_ : List[Any] = tf.cast(tf.math.not_equal(UpperCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowercase_ : int = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowercase_ : Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowercase_ : Any = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowercase_ : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
UpperCamelCase__ = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Dict = TFPegasusModelTester(self )
lowercase_ : Any = ConfigTester(self , config_class=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ )
@require_sentencepiece
@require_tokenizers
@require_tf
class __magic_name__ ( unittest.TestCase):
UpperCamelCase__ = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
UpperCamelCase__ = [
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
UpperCamelCase__ = '''google/pegasus-xsum'''
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **lowercase_ : Any ):
lowercase_ : List[Any] = self.translate_src_text(**lowercase_ )
assert self.expected_text == generated_words
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , **lowercase_ : List[Any] ):
lowercase_ : Optional[Any] = self.tokenizer(self.src_text , **lowercase_ , padding=lowercase_ , return_tensors="""tf""" )
lowercase_ : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase_ , )
lowercase_ : Union[str, Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase_ )
return generated_words
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self._assert_generated_batch_equal_expected()
| 30
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
| 1
|
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]=13 , lowercase_ : Tuple=7 , lowercase_ : Tuple=True , lowercase_ : Any=True , lowercase_ : Tuple=True , lowercase_ : Dict=True , lowercase_ : Union[str, Any]=99 , lowercase_ : Optional[int]=24 , lowercase_ : int=2 , lowercase_ : List[str]=6 , lowercase_ : int=37 , lowercase_ : Tuple="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : List[str]=512 , lowercase_ : Any=16 , lowercase_ : Union[str, Any]=2 , lowercase_ : Any=0.02 , lowercase_ : Optional[Any]=3 , lowercase_ : List[Any]=None , lowercase_ : List[Any]=1000 , ):
lowercase_ : Any = parent
lowercase_ : Any = batch_size
lowercase_ : Optional[Any] = seq_length
lowercase_ : Tuple = is_training
lowercase_ : str = use_input_mask
lowercase_ : Union[str, Any] = use_token_type_ids
lowercase_ : Any = use_labels
lowercase_ : Tuple = vocab_size
lowercase_ : Any = hidden_size
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Optional[Any] = intermediate_size
lowercase_ : str = hidden_act
lowercase_ : Union[str, Any] = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : Any = max_position_embeddings
lowercase_ : str = type_vocab_size
lowercase_ : Optional[Any] = type_sequence_label_size
lowercase_ : Optional[int] = initializer_range
lowercase_ : Optional[int] = num_labels
lowercase_ : List[str] = scope
lowercase_ : List[str] = range_bbox
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : Any = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
lowercase_ : Union[str, Any] = bbox[i, j, 3]
lowercase_ : Any = bbox[i, j, 1]
lowercase_ : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
lowercase_ : int = bbox[i, j, 2]
lowercase_ : Optional[Any] = bbox[i, j, 0]
lowercase_ : int = t
lowercase_ : Any = None
if self.use_input_mask:
lowercase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase_ : int = None
if self.use_token_type_ids:
lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : List[Any] = None
lowercase_ : Any = None
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Dict = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE_ ( self : str ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] , ):
lowercase_ : Any = LiltModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ )
lowercase_ : List[Any] = model(lowercase_ , bbox=lowercase_ , token_type_ids=lowercase_ )
lowercase_ : str = model(lowercase_ , bbox=lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : str , lowercase_ : int , ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Optional[int] = LiltForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(
lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , ):
lowercase_ : Optional[Any] = LiltForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : str = model(
lowercase_ , bbox=lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Optional[Any] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : int = config_and_inputs
lowercase_ : Optional[int] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': LiltModel,
'''question-answering''': LiltForQuestionAnswering,
'''text-classification''': LiltForSequenceClassification,
'''token-classification''': LiltForTokenClassification,
'''zero-shot''': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : str , lowercase_ : Any ):
return True
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = LiltModelTester(self )
lowercase_ : List[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase_ : List[str] = type
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Tuple = LiltModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@require_torch
@slow
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Dict = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(lowercase_ )
lowercase_ : Optional[int] = torch.tensor([[1, 2]] , device=lowercase_ )
lowercase_ : Any = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : List[str] = model(input_ids=lowercase_ , bbox=lowercase_ )
lowercase_ : Union[str, Any] = torch.Size([1, 2, 768] )
lowercase_ : Dict = torch.tensor(
[[-0.06_53, 0.09_50, -0.00_61], [-0.05_45, 0.09_26, -0.03_24]] , device=lowercase_ , )
self.assertTrue(outputs.last_hidden_state.shape , lowercase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , lowercase_ , atol=1E-3 ) )
| 30
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 1
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
_lowercase : Optional[List[str]] = None
_lowercase : Tuple = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
_lowercase : List[Any] = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class __magic_name__ :
UpperCamelCase__ = True
UpperCamelCase__ = None
# Automatically constructed
UpperCamelCase__ = "PIL.Image.Image"
UpperCamelCase__ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()})
UpperCamelCase__ = field(default='''Image''', init=_UpperCAmelCase, repr=_UpperCAmelCase)
def __call__( self : Optional[int] ):
return self.pa_type
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : str = np.array(lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowercase_ , lowercase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowercase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowercase_ )
elif isinstance(lowercase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowercase_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : dict , lowercase_ : Any=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
lowercase_ : Any = {}
lowercase_ , lowercase_ : Optional[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowercase_ ):
lowercase_ : List[str] = PIL.Image.open(lowercase_ )
else:
lowercase_ : Any = path.split("""::""" )[-1]
try:
lowercase_ : Optional[Any] = string_to_dict(lowercase_ , config.HUB_DATASETS_URL )["""repo_id"""]
lowercase_ : Tuple = token_per_repo_id.get(lowercase_ )
except ValueError:
lowercase_ : Tuple = None
with xopen(lowercase_ , """rb""" , use_auth_token=lowercase_ ) as f:
lowercase_ : Any = BytesIO(f.read() )
lowercase_ : str = PIL.Image.open(bytes_ )
else:
lowercase_ : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
lowercase_ : Any = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
lowercase_ : Tuple = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase_ : Union[str, Any] = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : List[Any] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
lowercase_ : List[str] = storage.field("""bytes""" )
else:
lowercase_ : str = pa.array([None] * len(lowercase_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
lowercase_ : List[str] = storage.field("""path""" )
else:
lowercase_ : Tuple = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : int = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
lowercase_ : List[Any] = pa.array(
[encode_np_array(np.array(lowercase_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase_ : Dict = pa.array([None] * len(lowercase_ ) , type=pa.string() )
lowercase_ : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(lowercase_ : Any ):
with xopen(lowercase_ , """rb""" ) as f:
lowercase_ : Any = f.read()
return bytes_
lowercase_ : Dict = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ : int = pa.array(
[os.path.basename(lowercase_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
lowercase_ : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(lowercase_ , self.pa_type )
def lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase_ : Union[str, Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> bytes:
lowercase_ : List[str] = BytesIO()
if image.format in list_image_compression_formats():
lowercase_ : int = image.format
else:
lowercase_ : str = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(UpperCAmelCase__ , format=UpperCAmelCase__ )
return buffer.getvalue()
def lowerCamelCase ( UpperCAmelCase__ : "PIL.Image.Image" ) -> dict:
if hasattr(UpperCAmelCase__ , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
lowercase_ : Dict = array.dtype
lowercase_ : Dict = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
lowercase_ : Any = dtype.kind
lowercase_ : Tuple = dtype.itemsize
lowercase_ : Any = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase_ : Dict = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
F'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase_ : List[Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase_ : int = dtype_byteorder + dtype_kind + str(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = np.dtype(UpperCAmelCase__ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(F'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
F'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
lowercase_ : Optional[Any] = PIL.Image.fromarray(array.astype(UpperCAmelCase__ ) )
return {"path": None, "bytes": image_to_bytes(UpperCAmelCase__ )}
def lowerCamelCase ( UpperCAmelCase__ : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
lowercase_ , lowercase_ : Union[str, Any] = first_non_null_value(UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(UpperCAmelCase__ , np.ndarray ):
lowercase_ : Union[str, Any] = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
elif isinstance(UpperCAmelCase__ , PIL.Image.Image ):
lowercase_ : List[str] = no_op_if_value_is_null(UpperCAmelCase__ )
return [obj_to_image_dict_func(UpperCAmelCase__ ) for obj in objs]
else:
return objs
else:
return objs
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
def __init__( self : Any , lowercase_ : Dict , lowercase_ : Dict=13 , lowercase_ : int=30 , lowercase_ : List[Any]=2 , lowercase_ : List[str]=3 , lowercase_ : Tuple=True , lowercase_ : List[str]=True , lowercase_ : Tuple=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : Tuple=4 , lowercase_ : Optional[int]=37 , lowercase_ : int="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=10 , lowercase_ : Optional[Any]=0.02 , lowercase_ : List[Any]=3 , lowercase_ : str=0.6 , lowercase_ : List[str]=None , ):
lowercase_ : Union[str, Any] = parent
lowercase_ : Any = batch_size
lowercase_ : Optional[Any] = image_size
lowercase_ : str = patch_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : List[str] = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : str = intermediate_size
lowercase_ : List[str] = hidden_act
lowercase_ : Tuple = hidden_dropout_prob
lowercase_ : List[str] = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = type_sequence_label_size
lowercase_ : int = initializer_range
lowercase_ : List[Any] = mask_ratio
lowercase_ : Optional[int] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
lowercase_ : Union[str, Any] = (image_size // patch_size) ** 2
lowercase_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : List[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : str ):
lowercase_ : Optional[Any] = ViTMAEModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[str] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : int ):
lowercase_ : List[str] = ViTMAEForPreTraining(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Union[str, Any] = model(lowercase_ )
lowercase_ : Tuple = (self.image_size // self.patch_size) ** 2
lowercase_ : Tuple = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
lowercase_ : Any = 1
lowercase_ : int = ViTMAEForPreTraining(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ : Optional[int] = model(lowercase_ )
lowercase_ : List[str] = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = config_and_inputs
lowercase_ : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
UpperCamelCase__ = {'''feature-extraction''': ViTMAEModel} if is_torch_available() else {}
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : str = ViTMAEModelTester(self )
lowercase_ : Any = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Tuple = model_class(lowercase_ )
lowercase_ : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Dict = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : str ):
# make masks reproducible
np.random.seed(2 )
lowercase_ : List[str] = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
lowercase_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
lowercase_ : Union[str, Any] = torch.from_numpy(lowercase_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
lowercase_ : Optional[Any] = pt_noise
super().check_pt_tf_models(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Union[str, Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs[0].cpu().numpy()
lowercase_ : int = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase_ )
lowercase_ : Any = model_class.from_pretrained(lowercase_ )
model.to(lowercase_ )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
lowercase_ : Any = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
# Make sure we don't have nans
lowercase_ : int = after_outputs[0].cpu().numpy()
lowercase_ : Optional[int] = 0
lowercase_ : List[Any] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowercase_ , 1E-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE_ ( self : int ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def SCREAMING_SNAKE_CASE_ ( self : str ):
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : str = ViTMAEModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
lowercase_ : Dict = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(lowercase_ )
lowercase_ : Tuple = self.default_image_processor
lowercase_ : int = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
lowercase_ : Union[str, Any] = ViTMAEConfig()
lowercase_ : Union[str, Any] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
lowercase_ : Union[str, Any] = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
lowercase_ : Any = model(**lowercase_ , noise=torch.from_numpy(lowercase_ ).to(device=lowercase_ ) )
# verify the logits
lowercase_ : List[Any] = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : str = torch.tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , expected_slice.to(lowercase_ ) , atol=1E-4 ) )
| 30
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 1
|
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __magic_name__ ( nn.Module):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1
UpperCamelCase__ = 1
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Union[str, Any] = []
lowercase_ : Union[str, Any] = []
for i in range(self.num_layers ):
lowercase_ : Dict = self.in_channels if i == 0 else self.out_channels
lowercase_ : Tuple = FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
lowercase_ : Any = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
lowercase_ : Union[str, Any] = resnets
lowercase_ : Any = attentions
if self.add_downsample:
lowercase_ : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Dict , lowercase_ : Optional[Any]=True ):
lowercase_ : int = ()
for resnet, attn in zip(self.resnets , self.attentions ):
lowercase_ : Optional[Any] = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
lowercase_ : Tuple = attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
output_states += (hidden_states,)
if self.add_downsample:
lowercase_ : Dict = self.downsamplers_a(lowercase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1
UpperCamelCase__ = True
UpperCamelCase__ = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = []
for i in range(self.num_layers ):
lowercase_ : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
lowercase_ : str = FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
lowercase_ : str = resnets
if self.add_downsample:
lowercase_ : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Dict , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=True ):
lowercase_ : int = ()
for resnet in self.resnets:
lowercase_ : str = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
output_states += (hidden_states,)
if self.add_downsample:
lowercase_ : List[str] = self.downsamplers_a(lowercase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class __magic_name__ ( nn.Module):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1
UpperCamelCase__ = 1
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Tuple = []
lowercase_ : str = []
for i in range(self.num_layers ):
lowercase_ : List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase_ : Union[str, Any] = self.prev_output_channel if i == 0 else self.out_channels
lowercase_ : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
lowercase_ : str = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
lowercase_ : List[Any] = resnets
lowercase_ : Dict = attentions
if self.add_upsample:
lowercase_ : str = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Any=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
lowercase_ : Dict = res_hidden_states_tuple[-1]
lowercase_ : List[Any] = res_hidden_states_tuple[:-1]
lowercase_ : int = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase_ : Tuple = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
lowercase_ : Union[str, Any] = attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
if self.add_upsample:
lowercase_ : Union[str, Any] = self.upsamplers_a(lowercase_ )
return hidden_states
class __magic_name__ ( nn.Module):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1
UpperCamelCase__ = True
UpperCamelCase__ = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = []
for i in range(self.num_layers ):
lowercase_ : List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase_ : List[Any] = self.prev_output_channel if i == 0 else self.out_channels
lowercase_ : Union[str, Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
lowercase_ : Dict = resnets
if self.add_upsample:
lowercase_ : List[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Dict=True ):
for resnet in self.resnets:
# pop res hidden states
lowercase_ : str = res_hidden_states_tuple[-1]
lowercase_ : Any = res_hidden_states_tuple[:-1]
lowercase_ : Union[str, Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
lowercase_ : Union[str, Any] = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
if self.add_upsample:
lowercase_ : Optional[Any] = self.upsamplers_a(lowercase_ )
return hidden_states
class __magic_name__ ( nn.Module):
UpperCamelCase__ = 42
UpperCamelCase__ = 0.0
UpperCamelCase__ = 1
UpperCamelCase__ = 1
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
# there is always at least one resnet
lowercase_ : int = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
lowercase_ : str = []
for _ in range(self.num_layers ):
lowercase_ : Optional[int] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_ )
lowercase_ : Optional[Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_ )
lowercase_ : str = resnets
lowercase_ : int = attentions
def __call__( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : List[Any]=True ):
lowercase_ : Dict = self.resnets[0](lowercase_ , lowercase_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
lowercase_ : Optional[Any] = attn(lowercase_ , lowercase_ , deterministic=lowercase_ )
lowercase_ : Any = resnet(lowercase_ , lowercase_ , deterministic=lowercase_ )
return hidden_states
| 30
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
| 1
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 1
|
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase : Dict = True
except ImportError:
_lowercase : Any = False
_lowercase : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCamelCase ( UpperCAmelCase__ : Namespace ) -> List[str]:
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class __magic_name__ ( _UpperCAmelCase):
@staticmethod
def SCREAMING_SNAKE_CASE_ ( lowercase_ : ArgumentParser ):
lowercase_ : int = parser.add_parser("""add-new-model""" )
add_new_model_parser.add_argument("""--testing""" , action="""store_true""" , help="""If in testing mode.""" )
add_new_model_parser.add_argument("""--testing_file""" , type=lowercase_ , help="""Configuration file on which to run.""" )
add_new_model_parser.add_argument(
"""--path""" , type=lowercase_ , help="""Path to cookiecutter. Should only be used for testing purposes.""" )
add_new_model_parser.set_defaults(func=lowercase_ )
def __init__( self : Optional[int] , lowercase_ : bool , lowercase_ : str , lowercase_ : Union[str, Any]=None , *lowercase_ : Dict ):
lowercase_ : Union[str, Any] = testing
lowercase_ : List[Any] = testing_file
lowercase_ : int = path
def SCREAMING_SNAKE_CASE_ ( self : Any ):
warnings.warn(
"""The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. """
"""It is not actively maintained anymore, so might give a result that won't pass all tests and quality """
"""checks, you should use `transformers-cli add-new-model-like` instead.""" )
if not _has_cookiecutter:
raise ImportError(
"""Model creation dependencies are required to use the `add_new_model` command. Install them by running """
"""the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n""" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowercase_ : List[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" == directory[:22]]
if len(lowercase_ ) > 0:
raise ValueError(
"""Several directories starting with `cookiecutter-template-` in current working directory. """
"""Please clean your directory by removing all folders starting with `cookiecutter-template-` or """
"""change your working directory.""" )
lowercase_ : str = (
Path(lowercase_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowercase_ : List[Any] = path_to_transformer_root / """templates""" / """adding_a_new_model"""
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowercase_ ) )
else:
with open(self._testing_file , """r""" ) as configuration_file:
lowercase_ : Union[str, Any] = json.load(lowercase_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowercase_ , extra_context=lowercase_ , )
lowercase_ : List[Any] = [directory for directory in os.listdir() if """cookiecutter-template-""" in directory[:22]][0]
# Retrieve configuration
with open(directory + """/configuration.json""" , """r""" ) as configuration_file:
lowercase_ : Optional[int] = json.load(lowercase_ )
lowercase_ : List[Any] = configuration["""lowercase_modelname"""]
lowercase_ : List[Any] = configuration["""generate_tensorflow_pytorch_and_flax"""]
os.remove(f'''{directory}/configuration.json''' )
lowercase_ : Any = """PyTorch""" in generate_tensorflow_pytorch_and_flax
lowercase_ : Tuple = """TensorFlow""" in generate_tensorflow_pytorch_and_flax
lowercase_ : Any = """Flax""" in generate_tensorflow_pytorch_and_flax
lowercase_ : List[str] = f'''{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'''
os.makedirs(lowercase_ , exist_ok=lowercase_ )
os.makedirs(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}''' , exist_ok=lowercase_ )
# Tests require submodules as they have parent imports
with open(f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py''' , """w""" ):
pass
shutil.move(
f'''{directory}/__init__.py''' , f'''{model_dir}/__init__.py''' , )
shutil.move(
f'''{directory}/configuration_{lowercase_model_name}.py''' , f'''{model_dir}/configuration_{lowercase_model_name}.py''' , )
def remove_copy_lines(lowercase_ : int ):
with open(lowercase_ , """r""" ) as f:
lowercase_ : Union[str, Any] = f.readlines()
with open(lowercase_ , """w""" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowercase_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_{lowercase_model_name}.py''' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_tf_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_tf_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_tf_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_tf_{lowercase_model_name}.py''' )
if output_flax:
if not self._testing:
remove_copy_lines(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/modeling_flax_{lowercase_model_name}.py''' , f'''{model_dir}/modeling_flax_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' , f'''{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py''' , )
else:
os.remove(f'''{directory}/modeling_flax_{lowercase_model_name}.py''' )
os.remove(f'''{directory}/test_modeling_flax_{lowercase_model_name}.py''' )
shutil.move(
f'''{directory}/{lowercase_model_name}.md''' , f'''{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md''' , )
shutil.move(
f'''{directory}/tokenization_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}.py''' , )
shutil.move(
f'''{directory}/tokenization_fast_{lowercase_model_name}.py''' , f'''{model_dir}/tokenization_{lowercase_model_name}_fast.py''' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase_ : str , lowercase_ : str , lowercase_ : List[str] ):
# Create temp file
lowercase_ , lowercase_ : Any = mkstemp()
lowercase_ : List[str] = False
with fdopen(lowercase_ , """w""" ) as new_file:
with open(lowercase_ ) as old_file:
for line in old_file:
new_file.write(lowercase_ )
if line_to_copy_below in line:
lowercase_ : int = True
for line_to_copy in lines_to_copy:
new_file.write(lowercase_ )
if not line_found:
raise ValueError(f'''Line {line_to_copy_below} was not found in file.''' )
# Copy the file permissions from the old file to the new file
copymode(lowercase_ , lowercase_ )
# Remove original file
remove(lowercase_ )
# Move new file
move(lowercase_ , lowercase_ )
def skip_units(lowercase_ : str ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase_ : int ):
with open(lowercase_ ) as datafile:
lowercase_ : Optional[int] = []
lowercase_ : List[Any] = False
lowercase_ : Dict = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowercase_ : Optional[Any] = line.split("""\"""" )[1]
lowercase_ : Optional[int] = skip_units(lowercase_ )
elif "# Below: " in line and "##" not in line:
lowercase_ : List[str] = line.split("""\"""" )[1]
lowercase_ : Union[str, Any] = skip_units(lowercase_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : int = []
elif "# Replace with" in line and "##" not in line:
lowercase_ : Optional[int] = []
elif "##" not in line:
lines_to_copy.append(lowercase_ )
remove(lowercase_ )
replace_in_files(f'''{directory}/to_replace_{lowercase_model_name}.py''' )
os.rmdir(lowercase_ )
| 30
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 1
|
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Union[str, Any] , *lowercase_ : Optional[Any] , **lowercase_ : Tuple ):
super().__init__(*lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : int ):
lowercase_ : int = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(lowercase_ )
lowercase_ : Any = self.values[key]
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return (
sum(self.charge_factor - len(lowercase_ ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(lowercase_ ) == 0
):
return key
return super()._collision_resolution(lowercase_ , lowercase_ )
| 30
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[int] = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_lowercase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
_lowercase : Optional[Any] = TypeVar("_T")
class __magic_name__ ( Generic[_T]):
def __init__( self : str , lowercase_ : Iterable[_T] | None = None ):
lowercase_ : list[_T] = list(iterable or [] )
lowercase_ : list[_T] = []
def __len__( self : Optional[int] ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Optional[int] ):
return f'''Queue({tuple(self._stacka[::-1] + self._stacka )})'''
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : _T ):
self._stacka.append(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Tuple = self._stacka.pop
lowercase_ : Tuple = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 1
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''', _UpperCAmelCase, )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RobertaConfig
UpperCamelCase__ = '''roberta'''
def __init__( self : List[str] , lowercase_ : Optional[int] ):
super().__init__(lowercase_ )
lowercase_ : int = RobertaEmbeddings(lowercase_ )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''', _UpperCAmelCase, )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RobertaConfig
UpperCamelCase__ = '''roberta'''
def __init__( self : List[str] , lowercase_ : int ):
super().__init__(lowercase_ )
lowercase_ : str = config.num_labels
lowercase_ : List[str] = config.num_hidden_layers
lowercase_ : Dict = DeeRobertaModel(lowercase_ )
lowercase_ : Union[str, Any] = nn.Dropout(config.hidden_dropout_prob )
lowercase_ : Optional[Any] = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Dict=None , lowercase_ : int=None , lowercase_ : Optional[int]=None , lowercase_ : List[str]=None , lowercase_ : Tuple=None , lowercase_ : int=None , lowercase_ : List[str]=None , lowercase_ : List[str]=-1 , lowercase_ : Tuple=False , ):
lowercase_ : int = self.num_layers
try:
lowercase_ : List[str] = self.roberta(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , position_ids=lowercase_ , head_mask=lowercase_ , inputs_embeds=lowercase_ , )
lowercase_ : Optional[Any] = outputs[1]
lowercase_ : List[str] = self.dropout(lowercase_ )
lowercase_ : str = self.classifier(lowercase_ )
lowercase_ : Optional[int] = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase_ : Tuple = e.message
lowercase_ : int = e.exit_layer
lowercase_ : Dict = outputs[0]
if not self.training:
lowercase_ : Optional[Any] = entropy(lowercase_ )
lowercase_ : str = []
lowercase_ : List[str] = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase_ : Optional[int] = MSELoss()
lowercase_ : Optional[Any] = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowercase_ : int = []
for highway_exit in outputs[-1]:
lowercase_ : Dict = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowercase_ : List[Any] = MSELoss()
lowercase_ : Tuple = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Any = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowercase_ )
if train_highway:
lowercase_ : Any = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowercase_ : Dict = (loss,) + outputs
if not self.training:
lowercase_ : Dict = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase_ : Union[str, Any] = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 30
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
| 1
|
'''simple docstring'''
import random
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : bool = False ) -> dict:
lowercase_ : dict = {i: [] for i in range(UpperCAmelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCAmelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCAmelCase__ ):
for j in range(i + 1 , UpperCAmelCase__ ):
if random.random() < probability:
graph[i].append(UpperCAmelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCAmelCase__ )
return graph
def lowerCamelCase ( UpperCAmelCase__ : int ) -> dict:
return {
i: [j for j in range(UpperCAmelCase__ ) if i != j] for i in range(UpperCAmelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True})
UpperCamelCase__ = Features({'''image''': Image()})
UpperCamelCase__ = Features({'''labels''': ClassLabel})
UpperCamelCase__ = "image"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ : List[str] = copy.deepcopy(self )
lowercase_ : List[str] = self.label_schema.copy()
lowercase_ : List[Any] = features[self.label_column]
lowercase_ : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''perceiver'''
def __init__( self : str , lowercase_ : Union[str, Any]=256 , lowercase_ : Dict=1280 , lowercase_ : List[str]=768 , lowercase_ : int=1 , lowercase_ : Any=26 , lowercase_ : Tuple=8 , lowercase_ : Any=8 , lowercase_ : int=None , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]="kv" , lowercase_ : Optional[int]=1 , lowercase_ : str=1 , lowercase_ : Tuple="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Dict=0.02 , lowercase_ : Optional[int]=1E-12 , lowercase_ : List[Any]=True , lowercase_ : List[str]=262 , lowercase_ : Tuple=2048 , lowercase_ : int=56 , lowercase_ : int=[368, 496] , lowercase_ : Tuple=16 , lowercase_ : List[Any]=1920 , lowercase_ : Optional[Any]=16 , lowercase_ : Tuple=[1, 16, 224, 224] , **lowercase_ : Tuple , ):
super().__init__(**lowercase_ )
lowercase_ : Any = num_latents
lowercase_ : Dict = d_latents
lowercase_ : Tuple = d_model
lowercase_ : List[Any] = num_blocks
lowercase_ : Optional[Any] = num_self_attends_per_block
lowercase_ : Tuple = num_self_attention_heads
lowercase_ : Any = num_cross_attention_heads
lowercase_ : Union[str, Any] = qk_channels
lowercase_ : Union[str, Any] = v_channels
lowercase_ : Optional[Any] = cross_attention_shape_for_attention
lowercase_ : Any = self_attention_widening_factor
lowercase_ : int = cross_attention_widening_factor
lowercase_ : Optional[int] = hidden_act
lowercase_ : Any = attention_probs_dropout_prob
lowercase_ : int = initializer_range
lowercase_ : int = layer_norm_eps
lowercase_ : Optional[int] = use_query_residual
# masked language modeling attributes
lowercase_ : Dict = vocab_size
lowercase_ : Dict = max_position_embeddings
# image classification attributes
lowercase_ : Any = image_size
# flow attributes
lowercase_ : str = train_size
# multimodal autoencoding attributes
lowercase_ : Dict = num_frames
lowercase_ : Optional[Any] = audio_samples_per_frame
lowercase_ : Optional[Any] = samples_per_patch
lowercase_ : int = output_shape
class __magic_name__ ( _UpperCAmelCase):
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
if self.task == "multiple-choice":
lowercase_ : Dict = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
lowercase_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""inputs""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return 1E-4
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : int = -1 , lowercase_ : bool = False , lowercase_ : Optional[TensorType] = None , lowercase_ : int = 3 , lowercase_ : int = 40 , lowercase_ : int = 40 , ):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(lowercase_ , lowercase_ ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ : Dict = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase_ : int = preprocessor.num_special_tokens_to_add(lowercase_ )
lowercase_ : Optional[Any] = compute_effective_axis_dimension(
lowercase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowercase_ )
# Generate dummy inputs according to compute batch and sequence
lowercase_ : Optional[int] = [""" """.join(["""a"""] ) * seq_length] * batch_size
lowercase_ : List[Any] = dict(preprocessor(lowercase_ , return_tensors=lowercase_ ) )
lowercase_ : List[Any] = inputs.pop("""input_ids""" )
return inputs
elif isinstance(lowercase_ , lowercase_ ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase_ : int = compute_effective_axis_dimension(lowercase_ , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase_ : Union[str, Any] = self._generate_dummy_images(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = dict(preprocessor(images=lowercase_ , return_tensors=lowercase_ ) )
lowercase_ : str = inputs.pop("""pixel_values""" )
return inputs
else:
raise ValueError(
"""Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.""" )
| 30
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
| 1
|
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_lowercase : Tuple = logging.get_logger(__name__)
@dataclass
class __magic_name__ :
UpperCamelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(glue_processors.keys())})
UpperCamelCase__ = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''})
UpperCamelCase__ = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
UpperCamelCase__ = field(
default=_UpperCAmelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''})
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[Any] = self.task_name.lower()
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''train'''
UpperCamelCase__ = '''dev'''
UpperCamelCase__ = '''test'''
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
UpperCamelCase__ = 42
def __init__( self : Union[str, Any] , lowercase_ : GlueDataTrainingArguments , lowercase_ : PreTrainedTokenizerBase , lowercase_ : Optional[int] = None , lowercase_ : Union[str, Split] = Split.train , lowercase_ : Optional[str] = None , ):
warnings.warn(
"""This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py""" , lowercase_ , )
lowercase_ : Optional[Any] = args
lowercase_ : Union[str, Any] = glue_processors[args.task_name]()
lowercase_ : Dict = glue_output_modes[args.task_name]
if isinstance(lowercase_ , lowercase_ ):
try:
lowercase_ : str = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
# Load data features from cache or dataset file
lowercase_ : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
lowercase_ : int = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase_ , lowercase_ : Tuple = label_list[2], label_list[1]
lowercase_ : Union[str, Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase_ : Union[str, Any] = cached_features_file + """.lock"""
with FileLock(lowercase_ ):
if os.path.exists(lowercase_ ) and not args.overwrite_cache:
lowercase_ : Optional[Any] = time.time()
lowercase_ : str = torch.load(lowercase_ )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
lowercase_ : Optional[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
lowercase_ : Optional[int] = self.processor.get_test_examples(args.data_dir )
else:
lowercase_ : Dict = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
lowercase_ : Optional[int] = examples[:limit_length]
lowercase_ : Optional[int] = glue_convert_examples_to_features(
lowercase_ , lowercase_ , max_length=args.max_seq_length , label_list=lowercase_ , output_mode=self.output_mode , )
lowercase_ : Any = time.time()
torch.save(self.features , lowercase_ )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , lowercase_ : Dict ):
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.label_list
| 30
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if start is None:
lowercase_ : Any = 0
if end is None:
lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1
if start >= end:
return
lowercase_ : Optional[int] = (start + end) // 2
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end]
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
| 1
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool:
lowercase_ : int = int(number**0.5 )
return number == sq * sq
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : int ) -> tuple[int, int]:
lowercase_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase_ : int = x_den * y_den * z_den
lowercase_ : int = gcd(UpperCAmelCase__ , UpperCAmelCase__ )
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase ( UpperCAmelCase__ : int = 35 ) -> int:
lowercase_ : set = set()
lowercase_ : int
lowercase_ : Fraction = Fraction(0 )
lowercase_ : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowercase_ : str = x_num * y_den + x_den * y_num
lowercase_ : Optional[int] = x_den * y_den
lowercase_ : Optional[int] = gcd(UpperCAmelCase__ , UpperCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ : List[str] = add_three(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
unique_s.add(UpperCAmelCase__ )
# n=2
lowercase_ : List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase_ : Optional[int] = x_den * x_den * y_den * y_den
if is_sq(UpperCAmelCase__ ) and is_sq(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = int(sqrt(UpperCAmelCase__ ) )
lowercase_ : Optional[Any] = int(sqrt(UpperCAmelCase__ ) )
lowercase_ : List[Any] = gcd(UpperCAmelCase__ , UpperCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ : List[Any] = add_three(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
unique_s.add(UpperCAmelCase__ )
# n=-1
lowercase_ : List[Any] = x_num * y_num
lowercase_ : Any = x_den * y_num + x_num * y_den
lowercase_ : Any = gcd(UpperCAmelCase__ , UpperCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ : List[Any] = add_three(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
unique_s.add(UpperCAmelCase__ )
# n=2
lowercase_ : str = x_num * x_num * y_num * y_num
lowercase_ : Tuple = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(UpperCAmelCase__ ) and is_sq(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = int(sqrt(UpperCAmelCase__ ) )
lowercase_ : int = int(sqrt(UpperCAmelCase__ ) )
lowercase_ : Optional[int] = gcd(UpperCAmelCase__ , UpperCAmelCase__ )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase_ : List[str] = add_three(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
unique_s.add(UpperCAmelCase__ )
for num, den in unique_s:
total += Fraction(UpperCAmelCase__ , UpperCAmelCase__ )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 30
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
| 1
|
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = (DDIMParallelScheduler,)
UpperCamelCase__ = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def SCREAMING_SNAKE_CASE_ ( self : Any , **lowercase_ : str ):
lowercase_ : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""clip_sample""": True,
}
config.update(**lowercase_ )
return config
def SCREAMING_SNAKE_CASE_ ( self : Dict , **lowercase_ : List[str] ):
lowercase_ : Optional[int] = self.scheduler_classes[0]
lowercase_ : List[Any] = self.get_scheduler_config(**lowercase_ )
lowercase_ : Any = scheduler_class(**lowercase_ )
lowercase_ , lowercase_ : Optional[int] = 10, 0.0
lowercase_ : Tuple = self.dummy_model()
lowercase_ : Optional[int] = self.dummy_sample_deter
scheduler.set_timesteps(lowercase_ )
for t in scheduler.timesteps:
lowercase_ : Any = model(lowercase_ , lowercase_ )
lowercase_ : Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_ , lowercase_ ).prev_sample
return sample
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=lowercase_ )
lowercase_ : Dict = self.scheduler_classes[0]
lowercase_ : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
lowercase_ : int = scheduler_class(**lowercase_ )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
self.check_over_configs(thresholding=lowercase_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=lowercase_ , prediction_type=lowercase_ , sample_max_value=lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=lowercase_ , num_inference_steps=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=lowercase_ , eta=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Dict = self.scheduler_classes[0]
lowercase_ : Dict = self.get_scheduler_config()
lowercase_ : Optional[Any] = scheduler_class(**lowercase_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[Any] = self.scheduler_classes[0]
lowercase_ : List[Any] = self.get_scheduler_config()
lowercase_ : str = scheduler_class(**lowercase_ )
lowercase_ , lowercase_ : Dict = 10, 0.0
scheduler.set_timesteps(lowercase_ )
lowercase_ : Union[str, Any] = self.dummy_model()
lowercase_ : int = self.dummy_sample_deter
lowercase_ : List[Any] = self.dummy_sample_deter + 0.1
lowercase_ : Optional[Any] = self.dummy_sample_deter - 0.1
lowercase_ : int = samplea.shape[0]
lowercase_ : Optional[int] = torch.stack([samplea, samplea, samplea] , dim=0 )
lowercase_ : Dict = torch.arange(lowercase_ )[0:3, None].repeat(1 , lowercase_ )
lowercase_ : Any = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowercase_ : Any = scheduler.batch_step_no_noise(lowercase_ , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , lowercase_ )
lowercase_ : Optional[int] = torch.sum(torch.abs(lowercase_ ) )
lowercase_ : Tuple = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1E-2
assert abs(result_mean.item() - 0.49_82 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : str = self.full_loop()
lowercase_ : List[Any] = torch.sum(torch.abs(lowercase_ ) )
lowercase_ : Union[str, Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1E-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Dict = self.full_loop(prediction_type="""v_prediction""" )
lowercase_ : Union[str, Any] = torch.sum(torch.abs(lowercase_ ) )
lowercase_ : List[str] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 52.53_02 ) < 1E-2
assert abs(result_mean.item() - 0.06_84 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : int ):
# We specify different beta, so that the first alpha is 0.99
lowercase_ : Tuple = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
lowercase_ : Tuple = torch.sum(torch.abs(lowercase_ ) )
lowercase_ : Optional[Any] = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1E-2
assert abs(result_mean.item() - 0.19_51 ) < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
# We specify different beta, so that the first alpha is 0.99
lowercase_ : Optional[Any] = self.full_loop(set_alpha_to_one=lowercase_ , beta_start=0.01 )
lowercase_ : Dict = torch.sum(torch.abs(lowercase_ ) )
lowercase_ : Dict = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1E-2
assert abs(result_mean.item() - 0.19_41 ) < 1E-3
| 30
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''Salesforce/blip-image-captioning-base'''
UpperCamelCase__ = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
UpperCamelCase__ = '''image_captioner'''
UpperCamelCase__ = AutoModelForVisionaSeq
UpperCamelCase__ = ['''image''']
UpperCamelCase__ = ['''text''']
def __init__( self : Dict , *lowercase_ : int , **lowercase_ : str ):
requires_backends(self , ["""vision"""] )
super().__init__(*lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : "Image" ):
return self.pre_processor(images=lowercase_ , return_tensors="""pt""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict ):
return self.model.generate(**lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[str] ):
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0].strip()
| 30
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Dict , lowercase_ : List[str] , lowercase_ : Union[str, Any]=3 , lowercase_ : Dict=32 , lowercase_ : List[str]=3 , lowercase_ : List[Any]=10 , lowercase_ : List[str]=[8, 16, 32, 64] , lowercase_ : List[str]=[1, 1, 2, 1] , lowercase_ : int=True , lowercase_ : str=True , lowercase_ : str="relu" , lowercase_ : Any=3 , lowercase_ : str=None , lowercase_ : Tuple=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : int=1 , ):
lowercase_ : Optional[Any] = parent
lowercase_ : List[str] = batch_size
lowercase_ : Optional[Any] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = embeddings_size
lowercase_ : List[Any] = hidden_sizes
lowercase_ : Tuple = depths
lowercase_ : Dict = is_training
lowercase_ : Optional[Any] = use_labels
lowercase_ : Tuple = hidden_act
lowercase_ : Optional[int] = num_labels
lowercase_ : Union[str, Any] = scope
lowercase_ : List[str] = len(A__ )
lowercase_ : Union[str, Any] = out_features
lowercase_ : Any = out_indices
lowercase_ : int = num_groups
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Union[str, Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : str ):
lowercase_ : Dict = BitModel(config=A__ )
model.to(A__ )
model.eval()
lowercase_ : int = model(A__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : str = BitForImageClassification(A__ )
model.to(A__ )
model.eval()
lowercase_ : Tuple = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Tuple , lowercase_ : Optional[Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = BitBackbone(config=A__ )
model.to(A__ )
model.eval()
lowercase_ : Optional[Any] = model(A__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : Dict = None
lowercase_ : str = BitBackbone(config=A__ )
model.to(A__ )
model.eval()
lowercase_ : Tuple = model(A__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Tuple = BitModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(self , config_class=A__ , has_text_modality=A__ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Union[str, Any] = model_class(A__ )
lowercase_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Optional[int] = [*signature.parameters.keys()]
lowercase_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(config=A__ )
for name, module in model.named_modules():
if isinstance(A__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : int ):
lowercase_ : Dict = model_class(A__ )
model.to(A__ )
model.eval()
with torch.no_grad():
lowercase_ : List[str] = model(**self._prepare_for_class(A__ , A__ ) )
lowercase_ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(A__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : int = layer_type
lowercase_ : Optional[int] = True
check_hidden_states_output(A__ , A__ , A__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Dict = True
check_hidden_states_output(A__ , A__ , A__ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
pass
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Dict = BitModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def lowerCamelCase ( ) -> str:
lowercase_ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Any = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(A__ )
lowercase_ : List[str] = self.default_image_processor
lowercase_ : Tuple = prepare_img()
lowercase_ : int = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ )
# forward pass
with torch.no_grad():
lowercase_ : Dict = model(**A__ )
# verify the logits
lowercase_ : str = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A__ )
lowercase_ : str = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _lowerCamelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[str] = BitModelTester(self )
| 700
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : List[Any] ) -> Optional[Any]:
lowercase_ : Tuple = len(__A )
lowercase_ : str = len(matrix[0] )
lowercase_ : List[Any] = min(__A , __A )
for row in range(__A ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , __A ):
lowercase_ : List[str] = matrix[col][row] / matrix[row][row]
for i in range(__A , __A ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
lowercase_ : int = True
for i in range(row + 1 , __A ):
if matrix[i][row] != 0:
lowercase_ , lowercase_ : Dict = matrix[i], matrix[row]
lowercase_ : Dict = False
break
if reduce:
rank -= 1
for i in range(__A ):
lowercase_ : Dict = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Optional[int] = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] = ['ConditionalDetrFeatureExtractor']
_lowercase : Dict = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
| 0
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class __magic_name__ ( _SCREAMING_SNAKE_CASE):
UpperCamelCase__ = "imagegpt"
UpperCamelCase__ = ["past_key_values"]
UpperCamelCase__ = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Tuple , lowercase_ : Optional[Any]=512 + 1 , lowercase_ : Optional[int]=32 * 32 , lowercase_ : Optional[Any]=512 , lowercase_ : Optional[int]=24 , lowercase_ : int=8 , lowercase_ : Optional[int]=None , lowercase_ : Tuple="quick_gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Union[str, Any]=1E-5 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Tuple=True , lowercase_ : List[str]=True , lowercase_ : List[str]=False , lowercase_ : List[Any]=False , lowercase_ : str=False , **lowercase_ : List[Any] , ):
lowercase_ : Any = vocab_size
lowercase_ : Optional[Any] = n_positions
lowercase_ : Tuple = n_embd
lowercase_ : Any = n_layer
lowercase_ : Tuple = n_head
lowercase_ : Any = n_inner
lowercase_ : Dict = activation_function
lowercase_ : Dict = resid_pdrop
lowercase_ : Dict = embd_pdrop
lowercase_ : Dict = attn_pdrop
lowercase_ : List[str] = layer_norm_epsilon
lowercase_ : str = initializer_range
lowercase_ : Dict = scale_attn_weights
lowercase_ : Union[str, Any] = use_cache
lowercase_ : Union[str, Any] = scale_attn_by_inverse_layer_idx
lowercase_ : Dict = reorder_and_upcast_attn
lowercase_ : Any = tie_word_embeddings
super().__init__(tie_word_embeddings=A_ , **A_ )
class __magic_name__ ( _SCREAMING_SNAKE_CASE):
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Dict , lowercase_ : str = 1 , lowercase_ : Any = -1 , lowercase_ : List[str] = False , lowercase_ : List[str] = None , lowercase_ : Any = 3 , lowercase_ : Dict = 32 , lowercase_ : int = 32 , ):
lowercase_ : Optional[int] = self._generate_dummy_images(A_ , A_ , A_ , A_ )
lowercase_ : Optional[Any] = dict(preprocessor(images=A_ , return_tensors=A_ ) )
return inputs
| 703
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 0
|
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_lowercase : List[str] = '''\
Text data.
Second line of data.'''
_lowercase : Optional[int] = '''file'''
@pytest.fixture(scope="""session""" )
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
lowercase_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + '.zstd')
lowercase_ : Union[str, Any] = bytes(_lowercase , """utf-8""" )
with zstd.open(_lowercase , """wb""" ) as f:
f.write(_lowercase )
return path
@pytest.fixture
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Any:
with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , """w""" ) as f:
f.write(_lowercase )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> Any:
lowercase_ : Any = {'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
lowercase_ : List[Any] = input_paths[compression_format]
lowercase_ : Union[str, Any] = tmp_path / 'cache'
lowercase_ : Union[str, Any] = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase )
lowercase_ : Optional[int] = cached_path(_lowercase , download_config=_lowercase )
with open(_lowercase ) as f:
lowercase_ : List[str] = f.read()
with open(_lowercase ) as f:
lowercase_ : Optional[int] = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ) -> Tuple:
lowercase_ : Optional[Any] = 'custom_cache'
lowercase_ : Tuple = 'custom_extracted_dir'
lowercase_ : List[str] = tmp_path / 'custom_extracted_path'
if default_extracted:
lowercase_ : Union[str, Any] = ('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , _lowercase )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(_lowercase ) )
lowercase_ : List[Any] = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase_ : List[Any] = xz_file
lowercase_ : Any = (
DownloadConfig(extract_compressed_file=_lowercase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase )
)
lowercase_ : Tuple = cached_path(_lowercase , download_config=_lowercase )
assert Path(_lowercase ).parent.parts[-2:] == expected
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> Union[str, Any]:
# absolute path
lowercase_ : Tuple = str(Path(_lowercase ).resolve() )
assert cached_path(_lowercase ) == text_file
# relative path
lowercase_ : Dict = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowercase ) == text_file
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Union[str, Any]:
# absolute path
lowercase_ : List[Any] = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(_lowercase ):
cached_path(_lowercase )
# relative path
lowercase_ : Tuple = './__missing_file__.txt'
with pytest.raises(_lowercase ):
cached_path(_lowercase )
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Tuple:
lowercase_ : List[Any] = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(_lowercase ) as f:
lowercase_ : Optional[Any] = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _lowercase )
def lowerCamelCase ( ) -> Dict:
with pytest.raises(_lowercase ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _lowercase )
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> List[str]:
lowercase_ : Dict = tmp_path_factory.mktemp("""data""" ) / 'file.html'
with pytest.raises(_lowercase ):
http_get("""https://huggingface.co""" , temp_file=_lowercase )
with pytest.raises(_lowercase ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _lowercase )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
lowercase_ : str = tmp_path_factory.mktemp("""data""" ) / 'file.html'
with pytest.raises(_lowercase ):
ftp_get("""ftp://huggingface.co""" , temp_file=_lowercase )
with pytest.raises(_lowercase ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , _lowercase )
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> List[str]:
lowercase_ : Optional[int] = tmp_path_factory.mktemp("""data""" ) / 'file.html'
with pytest.raises(_lowercase ):
fsspec_get("""s3://huggingface.co""" , temp_file=_lowercase )
with pytest.raises(_lowercase ):
fsspec_head("""s3://huggingface.co""" )
| 704
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
| 0
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowercase : List[Any] = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, Iterable[int]] , UpperCAmelCase__ : bool , UpperCAmelCase__ : int ) -> Optional[int]:
def constraint_to_multiple_of(UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any=0 , UpperCAmelCase__ : int=None ):
lowercase_ : Tuple = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
lowercase_ : str = math.floor(val / multiple ) * multiple
if x < min_val:
lowercase_ : Any = math.ceil(val / multiple ) * multiple
return x
lowercase_ : int = (output_size, output_size) if isinstance(_A , _A ) else output_size
lowercase_ : int = get_image_size(_A )
lowercase_ : List[str] = output_size
# determine new height and width
lowercase_ : int = output_height / input_height
lowercase_ : int = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
lowercase_ : Tuple = scale_width
else:
# fit height
lowercase_ : Optional[int] = scale_height
lowercase_ : Any = constraint_to_multiple_of(scale_height * input_height , multiple=_A )
lowercase_ : Tuple = constraint_to_multiple_of(scale_width * input_width , multiple=_A )
return (new_height, new_width)
class __magic_name__ ( lowercase__):
UpperCamelCase__ = ['''pixel_values''']
def __init__( self : Dict , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = False , lowercase_ : int = 1 , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 255 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , **lowercase_ : List[str] , ):
super().__init__(**__lowerCamelCase )
lowercase_ : Optional[Any] = size if size is not None else {"height": 384, "width": 384}
lowercase_ : Any = get_size_dict(__lowerCamelCase )
lowercase_ : Any = do_resize
lowercase_ : int = size
lowercase_ : str = keep_aspect_ratio
lowercase_ : Tuple = ensure_multiple_of
lowercase_ : Union[str, Any] = resample
lowercase_ : int = do_rescale
lowercase_ : int = rescale_factor
lowercase_ : List[Any] = do_normalize
lowercase_ : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase_ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : bool = False , lowercase_ : int = 1 , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ):
lowercase_ : str = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
lowercase_ : List[Any] = get_resize_output_image_size(
__lowerCamelCase , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=__lowerCamelCase , multiple=__lowerCamelCase , )
return resize(__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ):
return rescale(__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ):
return normalize(__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : List[Any] , ):
lowercase_ : str = do_resize if do_resize is not None else self.do_resize
lowercase_ : Tuple = size if size is not None else self.size
lowercase_ : Dict = get_size_dict(__lowerCamelCase )
lowercase_ : Union[str, Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
lowercase_ : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
lowercase_ : int = resample if resample is not None else self.resample
lowercase_ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
lowercase_ : Dict = image_std if image_std is not None else self.image_std
lowercase_ : List[Any] = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowercase_ : Any = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
lowercase_ : Tuple = [self.resize(image=__lowerCamelCase , size=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_rescale:
lowercase_ : Dict = [self.rescale(image=__lowerCamelCase , scale=__lowerCamelCase ) for image in images]
if do_normalize:
lowercase_ : Any = [self.normalize(image=__lowerCamelCase , mean=__lowerCamelCase , std=__lowerCamelCase ) for image in images]
lowercase_ : str = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
lowercase_ : Optional[Any] = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Any , lowercase_ : List[Tuple] = None ):
lowercase_ : List[Any] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowerCamelCase ) != len(__lowerCamelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__lowerCamelCase ):
lowercase_ : Any = target_sizes.numpy()
lowercase_ : Optional[Any] = []
for idx in range(len(__lowerCamelCase ) ):
lowercase_ : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__lowerCamelCase )
lowercase_ : Optional[int] = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__lowerCamelCase )
else:
lowercase_ : Union[str, Any] = logits.argmax(dim=1 )
lowercase_ : Any = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 705
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
| 0
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
_lowercase : Tuple = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class __magic_name__ ( __lowerCamelCase):
def __init__( self : List[str] , **lowercase_ : Any ):
super().__init__(**UpperCAmelCase_ )
requires_backends(self , """vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , lowercase_ : Union[str, List[str], "Image", List["Image"]] , **lowercase_ : Dict ):
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **lowercase_ : Dict ):
lowercase_ : List[str] = {}
if "candidate_labels" in kwargs:
lowercase_ : Dict = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowercase_ : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[str] , lowercase_ : Union[str, Any]=None , lowercase_ : Optional[int]="This is a photo of {}." ):
lowercase_ : Union[str, Any] = load_image(UpperCAmelCase_ )
lowercase_ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
lowercase_ : Dict = candidate_labels
lowercase_ : Any = [hypothesis_template.format(UpperCAmelCase_ ) for x in candidate_labels]
lowercase_ : Union[str, Any] = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_ )
lowercase_ : Union[str, Any] = [text_inputs]
return inputs
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : List[str] ):
lowercase_ : Union[str, Any] = model_inputs.pop("""candidate_labels""" )
lowercase_ : Optional[Any] = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] , UpperCAmelCase_ ):
lowercase_ : List[str] = text_inputs[0]
else:
# Batching case.
lowercase_ : List[Any] = text_inputs[0][0]
lowercase_ : Dict = self.model(**UpperCAmelCase_ , **UpperCAmelCase_ )
lowercase_ : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Tuple ):
lowercase_ : List[Any] = model_outputs.pop("""candidate_labels""" )
lowercase_ : Any = model_outputs['logits'][0]
if self.framework == "pt":
lowercase_ : Union[str, Any] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase_ : str = probs.tolist()
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowercase_ : Dict = [scores]
elif self.framework == "tf":
lowercase_ : str = stable_softmax(UpperCAmelCase_ , axis=-1 )
lowercase_ : Optional[int] = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowercase_ : Dict = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_ ) , key=lambda lowercase_ : -x[0] )
]
return result
| 706
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 0
|
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __magic_name__ ( UpperCAmelCase_):
UpperCamelCase__ = 42
UpperCamelCase__ = 42
def __init__( self : Any , lowercase_ : List[str] , lowercase_ : Tuple ):
super().__init__()
self.register_modules(unet=_lowercase , scheduler=_lowercase )
@torch.no_grad()
def __call__( self : Union[str, Any] , lowercase_ : Any = 1 , lowercase_ : int = 50 , lowercase_ : str = None , lowercase_ : str = "pil" , lowercase_ : Optional[Any] = True , **lowercase_ : str , ):
lowercase_ : int = self.unet.config.sample_size
lowercase_ : Optional[Any] = (batch_size, 3, img_size, img_size)
lowercase_ : Union[str, Any] = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
lowercase_ : Dict = randn_tensor(_lowercase , generator=_lowercase , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
lowercase_ : Any = self.scheduler.schedule[t]
lowercase_ : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
lowercase_ : Union[str, Any] = self.scheduler.add_noise_to_input(_lowercase , _lowercase , generator=_lowercase )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
lowercase_ : Optional[int] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
lowercase_ : Optional[Any] = self.scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
lowercase_ : Dict = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
lowercase_ : Tuple = self.scheduler.step_correct(
_lowercase , _lowercase , _lowercase , _lowercase , step_output.prev_sample , step_output["""derivative"""] , )
lowercase_ : Dict = step_output.prev_sample
lowercase_ : Tuple = (sample / 2 + 0.5).clamp(0 , 1 )
lowercase_ : Tuple = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase_ : Optional[int] = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 0
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class __magic_name__ :
def __init__( self : str , lowercase_ : int , lowercase_ : Union[str, Any]=sys.maxsize ):
lowercase_ : Optional[int] = '''bilinear'''
lowercase_ : Optional[Any] = max_size
lowercase_ : List[Any] = short_edge_length
def __call__( self : int , lowercase_ : Dict ):
lowercase_ : Any = []
for img in imgs:
lowercase_ : Optional[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase_ : str = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase_ : int = size * 1.0 / min(lowercase_ , lowercase_ )
if h < w:
lowercase_ : Dict = size, scale * w
else:
lowercase_ : str = scale * h, size
if max(lowercase_ , lowercase_ ) > self.max_size:
lowercase_ : Any = self.max_size * 1.0 / max(lowercase_ , lowercase_ )
lowercase_ : Tuple = newh * scale
lowercase_ : Any = neww * scale
lowercase_ : List[str] = int(neww + 0.5 )
lowercase_ : List[str] = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase_ : Optional[int] = Image.fromarray(lowercase_ )
lowercase_ : List[str] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase_ : Optional[Any] = np.asarray(lowercase_ )
else:
lowercase_ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase_ : Union[str, Any] = nn.functional.interpolate(
lowercase_ , (newh, neww) , mode=self.interp_method , align_corners=lowercase_ ).squeeze(0 )
img_augs.append(lowercase_ )
return img_augs
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Dict ):
lowercase_ : Optional[Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase_ : Any = cfg.INPUT.FORMAT
lowercase_ : List[Any] = cfg.SIZE_DIVISIBILITY
lowercase_ : List[str] = cfg.PAD_VALUE
lowercase_ : Dict = cfg.INPUT.MAX_SIZE_TEST
lowercase_ : Optional[Any] = cfg.MODEL.DEVICE
lowercase_ : int = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase_ : Tuple = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase_ : int = lambda lowercase_ : (x - self.pixel_mean) / self.pixel_std
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[Any] ):
lowercase_ : Optional[Any] = tuple(max(lowercase_ ) for s in zip(*[img.shape for img in images] ) )
lowercase_ : Union[str, Any] = [im.shape[-2:] for im in images]
lowercase_ : Tuple = [
nn.functional.pad(
lowercase_ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowercase_ , lowercase_ )
]
return torch.stack(lowercase_ ), torch.tensor(lowercase_ )
def __call__( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any]=False ):
with torch.no_grad():
if not isinstance(lowercase_ , lowercase_ ):
lowercase_ : Any = [images]
if single_image:
assert len(lowercase_ ) == 1
for i in range(len(lowercase_ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowercase_ , images.pop(lowercase_ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowercase_ , torch.as_tensor(img_tensorize(images.pop(lowercase_ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase_ : Union[str, Any] = torch.tensor([im.shape[:2] for im in images] )
lowercase_ : Dict = self.aug(lowercase_ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase_ : List[str] = [self.normalizer(lowercase_ ) for x in images]
# now pad them to do the following operations
lowercase_ : Dict = self.pad(lowercase_ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase_ : Union[str, Any] = torch.true_divide(lowercase_ , lowercase_ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Union[str, Any] ) -> Optional[int]:
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple[int, int] ) -> Union[str, Any]:
assert torch.isfinite(__A ).all(), "Box tensor contains infinite or NaN!"
lowercase_ : Optional[Any] = box_size
tensor[:, 0].clamp_(min=0 , max=__A )
tensor[:, 1].clamp_(min=0 , max=__A )
tensor[:, 2].clamp_(min=0 , max=__A )
tensor[:, 3].clamp_(min=0 , max=__A )
| 708
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 0
|
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 709
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
| 0
|
'''simple docstring'''
_lowercase : Dict = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 710
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
'''simple docstring'''
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = ['''image_processor''']
UpperCamelCase__ = '''SamImageProcessor'''
def __init__( self : Any , lowercase_ : Optional[int] ):
super().__init__(__A )
lowercase_ : Union[str, Any] = self.image_processor
lowercase_ : Optional[int] = -10
lowercase_ : List[str] = self.image_processor.size["longest_edge"]
def __call__( self : Any , lowercase_ : Tuple=None , lowercase_ : Any=None , lowercase_ : Optional[Any]=None , lowercase_ : Dict=None , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : str , ):
lowercase_ : str = self.image_processor(
__A , return_tensors=__A , **__A , )
# pop arguments that are not used in the foward but used nevertheless
lowercase_ : List[Any] = encoding_image_processor["original_sizes"]
if hasattr(__A , """numpy""" ): # Checks if Torch or TF tensor
lowercase_ : Dict = original_sizes.numpy()
lowercase_ : Union[str, Any] = self._check_and_preprocess_points(
input_points=__A , input_labels=__A , input_boxes=__A , )
lowercase_ : Union[str, Any] = self._normalize_and_convert(
__A , __A , input_points=__A , input_labels=__A , input_boxes=__A , return_tensors=__A , )
return encoding_image_processor
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=None , lowercase_ : int=None , lowercase_ : Any=None , lowercase_ : int="pt" , ):
if input_points is not None:
if len(__A ) != len(__A ):
lowercase_ : Tuple = [
self._normalize_coordinates(self.target_size , __A , original_sizes[0] ) for point in input_points
]
else:
lowercase_ : Dict = [
self._normalize_coordinates(self.target_size , __A , __A )
for point, original_size in zip(__A , __A )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
lowercase_ : Dict = self._pad_points_and_labels(__A , __A )
lowercase_ : Optional[int] = np.array(__A )
if input_labels is not None:
lowercase_ : List[Any] = np.array(__A )
if input_boxes is not None:
if len(__A ) != len(__A ):
lowercase_ : Optional[Any] = [
self._normalize_coordinates(self.target_size , __A , original_sizes[0] , is_bounding_box=__A )
for box in input_boxes
]
else:
lowercase_ : List[str] = [
self._normalize_coordinates(self.target_size , __A , __A , is_bounding_box=__A )
for box, original_size in zip(__A , __A )
]
lowercase_ : Dict = np.array(__A )
if input_boxes is not None:
if return_tensors == "pt":
lowercase_ : str = torch.from_numpy(__A )
# boxes batch size of 1 by default
lowercase_ : int = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
lowercase_ : str = tf.convert_to_tensor(__A )
# boxes batch size of 1 by default
lowercase_ : Optional[Any] = tf.expand_dims(__A , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
lowercase_ : Dict = torch.from_numpy(__A )
# point batch size of 1 by default
lowercase_ : int = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
lowercase_ : Any = tf.convert_to_tensor(__A )
# point batch size of 1 by default
lowercase_ : Any = tf.expand_dims(__A , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
lowercase_ : Optional[int] = torch.from_numpy(__A )
# point batch size of 1 by default
lowercase_ : List[str] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
lowercase_ : Dict = tf.convert_to_tensor(__A )
# point batch size of 1 by default
lowercase_ : Tuple = tf.expand_dims(__A , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Dict , lowercase_ : str ):
lowercase_ : Optional[Any] = max([point.shape[0] for point in input_points] )
lowercase_ : Dict = []
for i, point in enumerate(__A ):
if point.shape[0] != expected_nb_points:
lowercase_ : Tuple = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
lowercase_ : Dict = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(__A )
lowercase_ : Tuple = processed_input_points
return input_points, input_labels
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : int , lowercase_ : np.ndarray , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]=False ):
lowercase_ : List[str] = original_size
lowercase_ : Union[str, Any] = self.image_processor._get_preprocess_shape(__A , longest_edge=__A )
lowercase_ : str = deepcopy(__A ).astype(__A )
if is_bounding_box:
lowercase_ : Dict = coords.reshape(-1 , 2 , 2 )
lowercase_ : int = coords[..., 0] * (new_w / old_w)
lowercase_ : Dict = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
lowercase_ : List[Any] = coords.reshape(-1 , 4 )
return coords
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : str=None , lowercase_ : Tuple=None , lowercase_ : List[Any]=None , ):
if input_points is not None:
if hasattr(__A , """numpy""" ): # Checks for TF or Torch tensor
lowercase_ : Optional[int] = input_points.numpy().tolist()
if not isinstance(__A , __A ) or not isinstance(input_points[0] , __A ):
raise ValueError("""Input points must be a list of list of floating points.""" )
lowercase_ : str = [np.array(__A ) for input_point in input_points]
else:
lowercase_ : List[str] = None
if input_labels is not None:
if hasattr(__A , """numpy""" ):
lowercase_ : Any = input_labels.numpy().tolist()
if not isinstance(__A , __A ) or not isinstance(input_labels[0] , __A ):
raise ValueError("""Input labels must be a list of list integers.""" )
lowercase_ : str = [np.array(__A ) for label in input_labels]
else:
lowercase_ : int = None
if input_boxes is not None:
if hasattr(__A , """numpy""" ):
lowercase_ : str = input_boxes.numpy().tolist()
if (
not isinstance(__A , __A )
or not isinstance(input_boxes[0] , __A )
or not isinstance(input_boxes[0][0] , __A )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
lowercase_ : str = [np.array(__A ).astype(np.floataa ) for box in input_boxes]
else:
lowercase_ : str = None
return input_points, input_labels, input_boxes
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(__A ) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , *lowercase_ : int , **lowercase_ : Union[str, Any] ):
return self.image_processor.post_process_masks(*__A , **__A )
| 711
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 0
|
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_lowercase : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(UpperCamelCase_)
class __magic_name__ ( UpperCamelCase_):
def __init__( self : str , **lowercase_ : Any ):
super().__init__(**_a )
if self.framework == "tf":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , """vision""" )
self.check_model_type(_a )
def __call__( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int] = None , **lowercase_ : Any , ):
if "text_queries" in kwargs:
lowercase_ : Any = kwargs.pop("""text_queries""" )
if isinstance(_a , (str, Image.Image) ):
lowercase_ : Optional[int] = {"""image""": image, """candidate_labels""": candidate_labels}
else:
lowercase_ : int = image
lowercase_ : List[str] = super().__call__(_a , **_a )
return results
def SCREAMING_SNAKE_CASE_ ( self : List[str] , **lowercase_ : Optional[int] ):
lowercase_ : str = {}
if "threshold" in kwargs:
lowercase_ : Optional[int] = kwargs["""threshold"""]
if "top_k" in kwargs:
lowercase_ : Dict = kwargs["""top_k"""]
return {}, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : int ):
lowercase_ : Any = load_image(inputs["""image"""] )
lowercase_ : List[str] = inputs["""candidate_labels"""]
if isinstance(_a , _a ):
lowercase_ : Union[str, Any] = candidate_labels.split(""",""" )
lowercase_ : str = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(_a ):
lowercase_ : Dict = self.tokenizer(_a , return_tensors=self.framework )
lowercase_ : Dict = self.image_processor(_a , return_tensors=self.framework )
yield {
"is_last": i == len(_a ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tuple ):
lowercase_ : Optional[int] = model_inputs.pop("""target_size""" )
lowercase_ : str = model_inputs.pop("""candidate_label""" )
lowercase_ : Dict = model_inputs.pop("""is_last""" )
lowercase_ : Tuple = self.model(**_a )
lowercase_ : int = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Any=0.1 , lowercase_ : int=None ):
lowercase_ : List[Any] = []
for model_output in model_outputs:
lowercase_ : List[str] = model_output["""candidate_label"""]
lowercase_ : List[Any] = BaseModelOutput(_a )
lowercase_ : List[str] = self.image_processor.post_process_object_detection(
outputs=_a , threshold=_a , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
lowercase_ : List[str] = outputs["""scores"""][index].item()
lowercase_ : Optional[Any] = self._get_bounding_box(outputs["""boxes"""][index][0] )
lowercase_ : Dict = {"""score""": score, """label""": label, """box""": box}
results.append(_a )
lowercase_ : List[str] = sorted(_a , key=lambda lowercase_ : x["score"] , reverse=_a )
if top_k:
lowercase_ : List[str] = results[:top_k]
return results
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[str] ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
lowercase_ : Dict = box.int().tolist()
lowercase_ : List[Any] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 712
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class __magic_name__ :
UpperCamelCase__ = XGLMConfig
UpperCamelCase__ = {}
UpperCamelCase__ = 'gelu'
def __init__( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : List[str]=14 , lowercase_ : int=7 , lowercase_ : Tuple=True , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Tuple=99 , lowercase_ : List[Any]=32 , lowercase_ : List[str]=2 , lowercase_ : Union[str, Any]=4 , lowercase_ : List[str]=37 , lowercase_ : List[str]="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=512 , lowercase_ : int=0.02 , ):
lowercase_ : Dict = parent
lowercase_ : Dict = batch_size
lowercase_ : List[str] = seq_length
lowercase_ : List[Any] = is_training
lowercase_ : Optional[Any] = use_input_mask
lowercase_ : List[str] = use_labels
lowercase_ : str = vocab_size
lowercase_ : int = d_model
lowercase_ : int = num_hidden_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : Union[str, Any] = ffn_dim
lowercase_ : List[str] = activation_function
lowercase_ : List[str] = activation_dropout
lowercase_ : Tuple = attention_dropout
lowercase_ : Any = max_position_embeddings
lowercase_ : Dict = initializer_range
lowercase_ : Tuple = None
lowercase_ : str = 0
lowercase_ : List[Any] = 2
lowercase_ : Dict = 1
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowercase_ : str = None
if self.use_input_mask:
lowercase_ : Any = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : str = self.get_config()
lowercase_ : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowercase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Any = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : str = config_and_inputs
lowercase_ : List[str] = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class __magic_name__ ( _snake_case, _snake_case, unittest.TestCase):
UpperCamelCase__ = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase__ = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase__ = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[Any] = TFXGLMModelTester(self )
lowercase_ : Any = ConfigTester(self , config_class=lowercase_ , n_embd=37 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : str = TFXGLMModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def SCREAMING_SNAKE_CASE_ ( self : int ):
super().test_resize_token_embeddings()
@require_tf
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : str=True ):
lowercase_ : Optional[Any] = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowercase_ : Dict = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowercase_ : int = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
lowercase_ : int = model.generate(lowercase_ , do_sample=lowercase_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[Any] = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowercase_ : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
lowercase_ : Union[str, Any] = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
lowercase_ : str = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
lowercase_ : Tuple = model.generate(lowercase_ , do_sample=lowercase_ , seed=[7, 0] )
lowercase_ : Tuple = tokenizer.decode(output_ids[0] , skip_special_tokens=lowercase_ )
lowercase_ : Tuple = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(lowercase_ , lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Tuple = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowercase_ : Any = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowercase_ : Tuple = """left"""
# use different length sentences to test batching
lowercase_ : Tuple = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
lowercase_ : List[Any] = tokenizer(lowercase_ , return_tensors="""tf""" , padding=lowercase_ )
lowercase_ : int = inputs["""input_ids"""]
lowercase_ : Optional[int] = model.generate(input_ids=lowercase_ , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
lowercase_ : Dict = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowercase_ : Union[str, Any] = model.generate(input_ids=lowercase_ , max_new_tokens=12 )
lowercase_ : List[Any] = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowercase_ : Tuple = model.generate(input_ids=lowercase_ , max_new_tokens=12 )
lowercase_ : int = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )
lowercase_ : str = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ )
lowercase_ : str = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ )
lowercase_ : Optional[Any] = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(lowercase_ , lowercase_ )
self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] )
| 713
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Any ):
lowercase_ : str = len(lowerCAmelCase__ )
print("""The following activities are selected:""" )
# The first activity is always selected
lowercase_ : Optional[int] = 0
print(lowerCAmelCase__ , end=""",""" )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__ , end=""",""" )
lowercase_ : Dict = j
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : str = [1, 3, 0, 5, 8, 5]
_lowercase : Tuple = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 714
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 0
|
'''simple docstring'''
import numpy as np
import datasets
_lowercase : Optional[int] = "\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n"
_lowercase : int = "\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n"
_lowercase : Tuple = "\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {'mahalanobis': array([0.5])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Any , lowercase_ : List[Any] ):
lowercase_ : int = np.array(_UpperCamelCase )
lowercase_ : Tuple = np.array(_UpperCamelCase )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
lowercase_ : int = X - np.mean(_UpperCamelCase )
lowercase_ : List[str] = np.cov(reference_distribution.T )
try:
lowercase_ : Tuple = np.linalg.inv(_UpperCamelCase )
except np.linalg.LinAlgError:
lowercase_ : Tuple = np.linalg.pinv(_UpperCamelCase )
lowercase_ : Optional[Any] = np.dot(_UpperCamelCase , _UpperCamelCase )
lowercase_ : Union[str, Any] = np.dot(_UpperCamelCase , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 715
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
| 0
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase : Any = logging.get_logger(__name__)
_lowercase : int = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __magic_name__ ( a__):
UpperCamelCase__ = '''mctct'''
def __init__( self : Tuple , lowercase_ : Any=8065 , lowercase_ : int=1536 , lowercase_ : Tuple=36 , lowercase_ : int=6144 , lowercase_ : Optional[int]=4 , lowercase_ : List[Any]=384 , lowercase_ : Optional[Any]=920 , lowercase_ : int=1E-5 , lowercase_ : Tuple=0.3 , lowercase_ : List[str]="relu" , lowercase_ : Any=0.02 , lowercase_ : Optional[Any]=0.3 , lowercase_ : int=0.3 , lowercase_ : int=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : Dict=1 , lowercase_ : List[Any]=0.3 , lowercase_ : List[Any]=1 , lowercase_ : Optional[Any]=(7,) , lowercase_ : List[str]=(3,) , lowercase_ : List[str]=80 , lowercase_ : List[Any]=1 , lowercase_ : Union[str, Any]=None , lowercase_ : Union[str, Any]="sum" , lowercase_ : Tuple=False , **lowercase_ : Optional[Any] , ):
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
lowercase_ : List[Any] = vocab_size
lowercase_ : Optional[Any] = hidden_size
lowercase_ : List[str] = num_hidden_layers
lowercase_ : Union[str, Any] = intermediate_size
lowercase_ : List[str] = num_attention_heads
lowercase_ : Optional[Any] = attention_head_dim
lowercase_ : Optional[int] = max_position_embeddings
lowercase_ : List[str] = layer_norm_eps
lowercase_ : Dict = layerdrop
lowercase_ : Optional[Any] = hidden_act
lowercase_ : Tuple = initializer_range
lowercase_ : Optional[int] = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : Union[str, Any] = pad_token_id
lowercase_ : Any = bos_token_id
lowercase_ : Union[str, Any] = eos_token_id
lowercase_ : str = conv_glu_dim
lowercase_ : int = conv_dropout
lowercase_ : str = num_conv_layers
lowercase_ : Union[str, Any] = input_feat_per_channel
lowercase_ : Any = input_channels
lowercase_ : Optional[int] = conv_channels
lowercase_ : str = ctc_loss_reduction
lowercase_ : str = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowercase_ : Any = list(_A )
lowercase_ : List[Any] = list(_A )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 716
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True})
UpperCamelCase__ = Features({'''image''': Image()})
UpperCamelCase__ = Features({'''labels''': ClassLabel})
UpperCamelCase__ = "image"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ : List[str] = copy.deepcopy(self )
lowercase_ : List[str] = self.label_schema.copy()
lowercase_ : List[Any] = features[self.label_column]
lowercase_ : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
| 0
|
'''simple docstring'''
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
lowercase_ : Optional[Any] = FileLock(str(tmpdir / """foo.lock""" ) )
lowercase_ : List[Any] = FileLock(str(tmpdir / """foo.lock""" ) )
lowercase_ : Any = 0.01
with locka.acquire():
with pytest.raises(_lowerCamelCase ):
lowercase_ : str = time.time()
locka.acquire(_lowerCamelCase )
assert time.time() - _start > timeout
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Optional[int]:
lowercase_ : Any = "a" * 1000 + ".lock"
lowercase_ : List[str] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(""".lock""" )
assert not locka._lock_file.endswith(_lowerCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowercase_ : Tuple = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(_lowerCamelCase ):
locka.acquire(0 )
| 717
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Optional[Any] = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 718
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if start is None:
lowercase_ : Any = 0
if end is None:
lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1
if start >= end:
return
lowercase_ : Optional[int] = (start + end) // 2
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end]
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Any = '''▁'''
_lowercase : List[str] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowercase : Tuple = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
_lowercase : Optional[int] = {
'''facebook/xglm-564M''': 2048,
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self : Dict , lowercase_ : List[str] , lowercase_ : Any="<s>" , lowercase_ : Union[str, Any]="</s>" , lowercase_ : Optional[Any]="</s>" , lowercase_ : List[str]="<s>" , lowercase_ : Optional[int]="<unk>" , lowercase_ : Union[str, Any]="<pad>" , lowercase_ : Optional[Dict[str, Any]] = None , **lowercase_ : List[Any] , ):
lowercase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase_ : int = 7
lowercase_ : List[Any] = [f'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase_ : Union[str, Any] = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
lowercase_ : Union[str, Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase_ : Union[str, Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase_ : Dict = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowercase_ : Optional[Any] = len(self.sp_model )
lowercase_ : List[Any] = {f'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
lowercase_ : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[Any] ):
lowercase_ : int = self.__dict__.copy()
lowercase_ : int = None
lowercase_ : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase_ : Union[str, Any] = {}
lowercase_ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase_ : List[str] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
lowercase_ : int = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Optional[int] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase_ : Any = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[str] ):
lowercase_ : str = """""".join(__UpperCamelCase ).replace(__UpperCamelCase , """ """ ).strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Any = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
lowercase_ : List[str] = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 719
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
| 0
|
'''simple docstring'''
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import BatchEncoding, MarianTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import is_sentencepiece_available, is_tf_available, is_torch_available
if is_sentencepiece_available():
from transformers.models.marian.tokenization_marian import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase : Dict = get_tests_dir("fixtures/test_sentencepiece.model")
_lowercase : Union[str, Any] = {"""target_lang""": """fi""", """source_lang""": """en"""}
_lowercase : int = """>>zh<<"""
_lowercase : List[str] = """Helsinki-NLP/"""
if is_torch_available():
_lowercase : Union[str, Any] = """pt"""
elif is_tf_available():
_lowercase : Dict = """tf"""
else:
_lowercase : Optional[Any] = """jax"""
@require_sentencepiece
class __magic_name__ ( _a, unittest.TestCase):
UpperCamelCase__ = MarianTokenizer
UpperCamelCase__ = False
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int ):
super().setUp()
lowercase_ : List[Any] = ["""</s>""", """<unk>""", """▁This""", """▁is""", """▁a""", """▁t""", """est""", """\u0120""", """<pad>"""]
lowercase_ : Optional[int] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
lowercase_ : List[str] = Path(self.tmpdirname )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["""vocab"""] )
save_json(snake_case_ , save_dir / VOCAB_FILES_NAMES["""tokenizer_config_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["source_spm"]).exists():
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["""source_spm"""] )
copyfile(snake_case_ , save_dir / VOCAB_FILES_NAMES["""target_spm"""] )
lowercase_ : Union[str, Any] = MarianTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , **lowercase_ : Optional[Any] ):
return MarianTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Tuple ):
return (
"This is a test",
"This is a test",
)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = """</s>"""
lowercase_ : List[str] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """</s>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """<pad>""" )
self.assertEqual(len(snake_case_ ) , 9 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
self.assertEqual(self.get_tokenizer().vocab_size , 9 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[Any] = MarianTokenizer.from_pretrained(f'''{ORG_NAME}opus-mt-en-de''' )
lowercase_ : Optional[Any] = en_de_tokenizer(["""I am a small frog"""] , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
lowercase_ : int = [38, 121, 14, 697, 38848, 0]
self.assertListEqual(snake_case_ , batch.input_ids[0] )
lowercase_ : List[str] = tempfile.mkdtemp()
en_de_tokenizer.save_pretrained(snake_case_ )
lowercase_ : Optional[int] = [x.name for x in Path(snake_case_ ).glob("""*""" )]
self.assertIn("""source.spm""" , snake_case_ )
MarianTokenizer.from_pretrained(snake_case_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : str = self.get_tokenizer()
lowercase_ : Optional[int] = tok(
["""I am a small frog""" * 1000, """I am a small frog"""] , padding=snake_case_ , truncation=snake_case_ , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch.input_ids.shape , (2, 512) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.get_tokenizer()
lowercase_ : Optional[int] = tok(["""I am a tiny frog""", """I am a small frog"""] , padding=snake_case_ , return_tensors=snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertEqual(batch_smaller.input_ids.shape , (2, 10) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
# fmt: off
lowercase_ : Union[str, Any] = {"""input_ids""": [[43495, 462, 20, 42164, 1369, 52, 464, 132, 1703, 492, 13, 7491, 38999, 6, 8, 464, 132, 1703, 492, 13, 4669, 37867, 13, 7525, 27, 1593, 988, 13, 33972, 7029, 6, 20, 8251, 383, 2, 270, 5866, 3788, 2, 2353, 8251, 12338, 2, 13958, 387, 2, 3629, 6953, 188, 2900, 2, 13958, 8011, 11501, 23, 8460, 4073, 34009, 20, 435, 11439, 27, 8, 8460, 4073, 6004, 20, 9988, 375, 27, 33, 266, 1945, 1076, 1350, 37867, 3288, 5, 577, 1076, 4374, 8, 5082, 5, 26453, 257, 556, 403, 2, 242, 132, 383, 316, 492, 8, 10767, 6, 316, 304, 4239, 3, 0], [148, 15722, 19, 1839, 12, 1350, 13, 22327, 5082, 5418, 47567, 35938, 59, 318, 19552, 108, 2183, 54, 14976, 4835, 32, 547, 1114, 8, 315, 2417, 5, 92, 19088, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100], [36, 6395, 12570, 39147, 11597, 6, 266, 4, 45405, 7296, 3, 0, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100, 58100]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="""Helsinki-NLP/opus-mt-en-de""" , revision="""1a8c2263da11e68e50938f97e10cd57820bd504c""" , decode_kwargs={"""use_source_tokenizer""": True} , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Union[str, Any] = MarianTokenizer.from_pretrained("""hf-internal-testing/test-marian-two-vocabs""" )
lowercase_ : Optional[int] = """Tämä on testi"""
lowercase_ : Dict = """This is a test"""
lowercase_ : int = [76, 7, 2047, 2]
lowercase_ : Optional[Any] = [69, 12, 11, 940, 2]
lowercase_ : str = tokenizer(snake_case_ ).input_ids
self.assertListEqual(snake_case_ , snake_case_ )
lowercase_ : Tuple = tokenizer(text_target=snake_case_ ).input_ids
self.assertListEqual(snake_case_ , snake_case_ )
lowercase_ : str = tokenizer.decode(snake_case_ , skip_special_tokens=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
| 720
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 0
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
lowercase_ : Optional[Any] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert("""RGB""" )
return image
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> int:
lowercase_ : Tuple = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] ) -> Dict:
lowercase_ : Union[str, Any] = dct.pop(__UpperCamelCase )
lowercase_ : Optional[int] = val
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> List[Any]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowercase_ : Union[str, Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
lowercase_ : Optional[Any] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
lowercase_ : Union[str, Any] = torch.cat((q_bias, torch.zeros_like(__UpperCamelCase , requires_grad=__UpperCamelCase ), v_bias) )
lowercase_ : Optional[Any] = qkv_bias
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> List[Any]:
lowercase_ : Dict = 364 if """coco""" in model_name else 224
lowercase_ : str = InstructBlipVisionConfig(image_size=__UpperCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowercase_ : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowercase_ : int = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowercase_ : int = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
lowercase_ : Union[str, Any] = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=32001 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowercase_ : str = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
lowercase_ : List[str] = InstructBlipConfig(vision_config=__UpperCamelCase , text_config=__UpperCamelCase , qformer_config=__UpperCamelCase )
return config, image_size
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Optional[Any]=False ) -> int:
lowercase_ : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
lowercase_ : Tuple = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowercase_ : Any = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
lowercase_ : Union[str, Any] = get_blipa_config(__UpperCamelCase )
lowercase_ : Optional[Any] = InstructBlipForConditionalGeneration(__UpperCamelCase ).eval()
lowercase_ : Tuple = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
lowercase_ : int = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowercase_ : List[Any] = """cuda:1""" if torch.cuda.is_available() else """cpu"""
lowercase_ : Dict = """cuda:2""" if torch.cuda.is_available() else """cpu"""
lowercase_ : Union[str, Any] = load_model_and_preprocess(
name=__UpperCamelCase , model_type=__UpperCamelCase , is_eval=__UpperCamelCase , device=__UpperCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowercase_ : Any = original_model.state_dict()
lowercase_ : List[str] = create_rename_keys(__UpperCamelCase )
for src, dest in rename_keys:
rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowercase_ : int = state_dict.pop(__UpperCamelCase )
if key.startswith("""Qformer.bert""" ):
lowercase_ : Any = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
lowercase_ : Tuple = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
lowercase_ : Dict = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
lowercase_ : Dict = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
lowercase_ : Union[str, Any] = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
lowercase_ : List[str] = key.replace("""t5""" , """language""" )
lowercase_ : Optional[Any] = val
# read in qv biases
read_in_q_v_bias(__UpperCamelCase , __UpperCamelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
lowercase_ : Dict = load_demo_image()
lowercase_ : Optional[Any] = """What is unusual about this image?"""
# create processor
lowercase_ : int = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=__UpperCamelCase , image_std=__UpperCamelCase )
lowercase_ : List[str] = InstructBlipProcessor(
image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase , qformer_tokenizer=__UpperCamelCase , )
lowercase_ : int = processor(images=__UpperCamelCase , text=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# make sure processor creates exact same pixel values
lowercase_ : Tuple = vis_processors["""eval"""](__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
lowercase_ : Dict = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __UpperCamelCase )
original_model.to(__UpperCamelCase )
hf_model.to(__UpperCamelCase )
with torch.no_grad():
if "vicuna" in model_name:
lowercase_ : Any = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
lowercase_ : Dict = hf_model(**__UpperCamelCase ).logits
else:
lowercase_ : Union[str, Any] = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
lowercase_ : List[Any] = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(__UpperCamelCase )
lowercase_ : int = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowercase_ : int = hf_model(**__UpperCamelCase , labels=__UpperCamelCase ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowercase_ : str = 1e-4 if """vicuna""" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , __UpperCamelCase , atol=__UpperCamelCase )
print("""Looks ok!""" )
print("""Generating with original model...""" )
lowercase_ : int = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
lowercase_ : str = hf_model.generate(
**__UpperCamelCase , do_sample=__UpperCamelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowercase_ : Any = 2
print("""Original generation:""" , __UpperCamelCase )
lowercase_ : List[Any] = processor.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
lowercase_ : str = [text.strip() for text in output_text]
print("""HF generation:""" , __UpperCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if push_to_hub:
processor.push_to_hub(F'''Salesforce/{model_name}''' )
hf_model.push_to_hub(F'''Salesforce/{model_name}''' )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser()
_lowercase : Optional[Any] = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
"--model_name",
default="instructblip-flan-t5-xl",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
_lowercase : str = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 721
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30
| 0
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> List[str]:
lowercase_ : Tuple = []
for part_id in partition_order:
lowercase_ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case__ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Tuple:
lowercase_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
lowercase_ : str = spark.range(100 ).repartition(1 )
lowercase_ : Dict = Spark(snake_case__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Any:
lowercase_ : Any = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
lowercase_ : Any = spark.range(10 ).repartition(2 )
lowercase_ : List[str] = [1, 0]
lowercase_ : str = _generate_iterable_examples(snake_case__ , snake_case__ ) # Reverse the partitions.
lowercase_ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , snake_case__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowercase_ , lowercase_ : int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Dict:
lowercase_ : Optional[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
lowercase_ : List[str] = spark.range(10 ).repartition(1 )
lowercase_ : Optional[int] = SparkExamplesIterable(snake_case__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case__ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Dict:
lowercase_ : Optional[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
lowercase_ : List[Any] = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""" ) as generator_mock:
lowercase_ : Optional[Any] = lambda UpperCAmelCase__ : x.reverse()
lowercase_ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [2, 1, 0] )
lowercase_ : int = SparkExamplesIterable(snake_case__ ).shuffle_data_sources(snake_case__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case__ ):
lowercase_ , lowercase_ : int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Any:
lowercase_ : int = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
lowercase_ : int = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowercase_ : List[str] = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case__ ):
lowercase_ , lowercase_ : List[str] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowercase_ : Optional[Any] = SparkExamplesIterable(snake_case__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowercase_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case__ ):
lowercase_ , lowercase_ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Dict = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate()
lowercase_ : List[str] = spark.range(100 ).repartition(1 )
lowercase_ : int = Spark(snake_case__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 700
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( lowercase__, unittest.TestCase):
UpperCamelCase__ = LEDTokenizer
UpperCamelCase__ = LEDTokenizerFast
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
super().setUp()
lowercase_ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
lowercase_ : Any = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) )
lowercase_ : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
lowercase_ : Dict = {"""unk_token""": """<unk>"""}
lowercase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowercase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **lowercase_ : Optional[int] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : int , **lowercase_ : Tuple ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple ):
return "lower newer", "lower newer"
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Tuple = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
lowercase_ : Any = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="""pt""" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
lowercase_ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(lowercase_ , lowercase_ )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : str = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : Dict = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="""pt""" )
self.assertIn("""input_ids""" , lowercase_ )
self.assertIn("""attention_mask""" , lowercase_ )
self.assertNotIn("""labels""" , lowercase_ )
self.assertNotIn("""decoder_attention_mask""" , lowercase_ )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : str = tokenizer(text_target=lowercase_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : List[Any] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=lowercase_ , truncation=lowercase_ , return_tensors="""pt""" )
self.assertIsInstance(lowercase_ , lowercase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : str = ["""A long paragraph for summarization."""]
lowercase_ : Optional[int] = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : int = tokenizer(lowercase_ , return_tensors="""pt""" )
lowercase_ : Tuple = tokenizer(text_target=lowercase_ , return_tensors="""pt""" )
lowercase_ : List[Any] = inputs["""input_ids"""]
lowercase_ : Optional[Any] = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
lowercase_ : str = ["""Summary of the text.""", """Another summary."""]
lowercase_ : Dict = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
lowercase_ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ )
lowercase_ : Optional[Any] = [[0] * len(lowercase_ ) for x in encoded_output["""input_ids"""]]
lowercase_ : List[Any] = tokenizer.pad(lowercase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase_ : Tuple = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = """A, <mask> AllenNLP sentence."""
lowercase_ : Dict = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
lowercase_ : Tuple = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
lowercase_ : List[str] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
lowercase_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowercase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowercase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 701
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Optional[int] = {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __magic_name__ ( _UpperCAmelCase):
'''simple docstring'''
UpperCamelCase__ = '''gpt_neox'''
def __init__( self : Optional[Any] , lowercase_ : Optional[Any]=50432 , lowercase_ : List[Any]=6144 , lowercase_ : List[str]=44 , lowercase_ : Union[str, Any]=64 , lowercase_ : Optional[Any]=24576 , lowercase_ : Dict="gelu" , lowercase_ : Tuple=0.25 , lowercase_ : str=10000 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Dict=0.0 , lowercase_ : Dict=0.1 , lowercase_ : str=2048 , lowercase_ : Tuple=0.02 , lowercase_ : Dict=1E-5 , lowercase_ : int=True , lowercase_ : int=0 , lowercase_ : List[Any]=2 , lowercase_ : Dict=False , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=None , **lowercase_ : Union[str, Any] , ):
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ : Union[str, Any] = vocab_size
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Tuple = hidden_size
lowercase_ : str = num_hidden_layers
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : str = intermediate_size
lowercase_ : Dict = hidden_act
lowercase_ : str = rotary_pct
lowercase_ : str = rotary_emb_base
lowercase_ : List[Any] = attention_dropout
lowercase_ : Optional[int] = hidden_dropout
lowercase_ : Any = classifier_dropout
lowercase_ : Tuple = initializer_range
lowercase_ : Optional[Any] = layer_norm_eps
lowercase_ : Optional[int] = use_cache
lowercase_ : int = tie_word_embeddings
lowercase_ : Dict = use_parallel_residual
lowercase_ : Any = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"""The hidden size is not divisble by the number of attention heads! Make sure to update them!""" )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
f'''got {self.rope_scaling}''' )
lowercase_ : str = self.rope_scaling.get("""type""" , lowercase_ )
lowercase_ : List[Any] = self.rope_scaling.get("""factor""" , lowercase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 702
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
| 0
|
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Optional[int]:
lowercase_ : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(snake_case_ , max_perimeter + 1 ):
lowercase_ : str = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(snake_case_ ):
lowercase_ : List[str] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase ( UpperCAmelCase__ : int = 1000 ) -> Union[str, Any]:
lowercase_ : List[str] = pythagorean_triple(snake_case_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 703
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Optional[int] = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class __magic_name__ ( lowercase__):
UpperCamelCase__ = '''ibert'''
def __init__( self : Optional[Any] , lowercase_ : Optional[int]=30522 , lowercase_ : Tuple=768 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : str=3072 , lowercase_ : str="gelu" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Tuple=512 , lowercase_ : Dict=2 , lowercase_ : str=0.02 , lowercase_ : Union[str, Any]=1E-12 , lowercase_ : Tuple=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[Any]=2 , lowercase_ : List[Any]="absolute" , lowercase_ : str=False , lowercase_ : Any="none" , **lowercase_ : Any , ):
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
lowercase_ : int = vocab_size
lowercase_ : int = hidden_size
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : Any = num_attention_heads
lowercase_ : int = hidden_act
lowercase_ : List[Any] = intermediate_size
lowercase_ : str = hidden_dropout_prob
lowercase_ : Optional[int] = attention_probs_dropout_prob
lowercase_ : int = max_position_embeddings
lowercase_ : List[Any] = type_vocab_size
lowercase_ : List[str] = initializer_range
lowercase_ : List[Any] = layer_norm_eps
lowercase_ : List[Any] = position_embedding_type
lowercase_ : Union[str, Any] = quant_mode
lowercase_ : Union[str, Any] = force_dequant
class __magic_name__ ( lowercase__):
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
if self.task == "multiple-choice":
lowercase_ : str = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase_ : List[Any] = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 704
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
| 0
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
_lowercase : Any = random.Random()
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str]=1.0 , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : List[str]=None ) -> Tuple:
if rng is None:
lowercase_ : Dict = global_rng
lowercase_ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __magic_name__ ( unittest.TestCase):
def __init__( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : str=7 , lowercase_ : int=400 , lowercase_ : List[str]=2000 , lowercase_ : Any=2048 , lowercase_ : Any=128 , lowercase_ : int=1 , lowercase_ : Optional[int]=512 , lowercase_ : Union[str, Any]=30 , lowercase_ : Any=44100 , ):
lowercase_ : Optional[Any] = parent
lowercase_ : str = batch_size
lowercase_ : str = min_seq_length
lowercase_ : Union[str, Any] = max_seq_length
lowercase_ : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase_ : Union[str, Any] = spectrogram_length
lowercase_ : Optional[int] = feature_size
lowercase_ : Tuple = num_audio_channels
lowercase_ : List[str] = hop_length
lowercase_ : Union[str, Any] = chunk_length
lowercase_ : Tuple = sampling_rate
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any]=False , lowercase_ : List[str]=False ):
def _flatten(lowercase_ : int ):
return list(itertools.chain(*_a ) )
if equal_length:
lowercase_ : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase_ : str = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase_ : Tuple = [np.asarray(_a ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ ( __SCREAMING_SNAKE_CASE, unittest.TestCase):
UpperCamelCase__ = TvltFeatureExtractor
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Dict = TvltFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_a , """spectrogram_length""" ) )
self.assertTrue(hasattr(_a , """feature_size""" ) )
self.assertTrue(hasattr(_a , """num_audio_channels""" ) )
self.assertTrue(hasattr(_a , """hop_length""" ) )
self.assertTrue(hasattr(_a , """chunk_length""" ) )
self.assertTrue(hasattr(_a , """sampling_rate""" ) )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : List[Any] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
lowercase_ : Any = self.feature_extraction_class.from_pretrained(_a )
lowercase_ : Any = feat_extract_first.to_dict()
lowercase_ : List[str] = feat_extract_second.to_dict()
lowercase_ : Union[str, Any] = dict_first.pop("""mel_filters""" )
lowercase_ : str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase_ : Optional[Any] = os.path.join(_a , """feat_extract.json""" )
feat_extract_first.to_json_file(_a )
lowercase_ : Optional[Any] = self.feature_extraction_class.from_json_file(_a )
lowercase_ : Optional[int] = feat_extract_first.to_dict()
lowercase_ : int = feat_extract_second.to_dict()
lowercase_ : Optional[Any] = dict_first.pop("""mel_filters""" )
lowercase_ : List[str] = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(_a , _a ) )
self.assertEqual(_a , _a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowercase_ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase_ : Optional[Any] = [np.asarray(_a ) for speech_input in speech_inputs]
# Test not batched input
lowercase_ : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowercase_ : Union[str, Any] = feature_extractor(_a , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowercase_ : str = feature_extractor(
_a , return_tensors="""np""" , sampling_rate=44100 , mask_audio=_a ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowercase_ : List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase_ : str = np.asarray(_a )
lowercase_ : Tuple = feature_extractor(_a , return_tensors="""np""" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, Any] ):
lowercase_ : Union[str, Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
lowercase_ : Any = ds.sort("""id""" ).select(range(_a ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : List[Any] = self._load_datasamples(1 )
lowercase_ : Dict = TvltFeatureExtractor()
lowercase_ : int = feature_extractor(_a , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
lowercase_ : Any = torch.tensor([[-0.30_32, -0.27_08], [-0.44_34, -0.40_07]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _a , atol=1E-4 ) )
| 705
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Any , lowercase_ : Optional[Any]=13 , lowercase_ : List[str]=7 , lowercase_ : str=True , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : str=99 , lowercase_ : List[Any]=32 , lowercase_ : Optional[Any]=5 , lowercase_ : int=4 , lowercase_ : Optional[int]=37 , lowercase_ : Dict="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : List[str]=128 , lowercase_ : str=32 , lowercase_ : Optional[int]=16 , lowercase_ : Optional[Any]=2 , lowercase_ : int=0.02 , lowercase_ : Tuple=3 , lowercase_ : Any=4 , lowercase_ : Dict=None , ):
lowercase_ : str = parent
lowercase_ : Dict = batch_size
lowercase_ : List[Any] = seq_length
lowercase_ : List[Any] = is_training
lowercase_ : Union[str, Any] = use_input_mask
lowercase_ : Optional[int] = use_token_type_ids
lowercase_ : int = use_labels
lowercase_ : Any = vocab_size
lowercase_ : Dict = hidden_size
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : List[str] = num_attention_heads
lowercase_ : List[Any] = intermediate_size
lowercase_ : Union[str, Any] = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Union[str, Any] = attention_probs_dropout_prob
lowercase_ : Tuple = max_position_embeddings
lowercase_ : Union[str, Any] = type_vocab_size
lowercase_ : Optional[int] = type_sequence_label_size
lowercase_ : Optional[Any] = initializer_range
lowercase_ : List[str] = num_labels
lowercase_ : List[Any] = num_choices
lowercase_ : Optional[int] = scope
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ : List[str] = None
if self.use_input_mask:
lowercase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ : List[str] = None
if self.use_token_type_ids:
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ : Optional[int] = None
lowercase_ : int = None
lowercase_ : Dict = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ ( self : int ):
(
lowercase_
) : Union[str, Any] = self.prepare_config_and_inputs()
lowercase_ : int = True
lowercase_ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : Dict , lowercase_ : int , lowercase_ : Any , lowercase_ : str ):
lowercase_ : Dict = NezhaModel(config=a_ )
model.to(a_ )
model.eval()
lowercase_ : Any = model(a_ , attention_mask=a_ , token_type_ids=a_ )
lowercase_ : Union[str, Any] = model(a_ , token_type_ids=a_ )
lowercase_ : List[str] = model(a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Any , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : Any , ):
lowercase_ : str = True
lowercase_ : Dict = NezhaModel(a_ )
model.to(a_ )
model.eval()
lowercase_ : Optional[Any] = model(
a_ , attention_mask=a_ , token_type_ids=a_ , encoder_hidden_states=a_ , encoder_attention_mask=a_ , )
lowercase_ : Dict = model(
a_ , attention_mask=a_ , token_type_ids=a_ , encoder_hidden_states=a_ , )
lowercase_ : Tuple = model(a_ , attention_mask=a_ , token_type_ids=a_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ):
lowercase_ : Optional[int] = NezhaForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowercase_ : Dict = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = NezhaForNextSentencePrediction(config=a_ )
model.to(a_ )
model.eval()
lowercase_ : Optional[int] = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[str] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : Dict ):
lowercase_ : Tuple = NezhaForPreTraining(config=a_ )
model.to(a_ )
model.eval()
lowercase_ : int = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , next_sentence_label=a_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Any , lowercase_ : Dict ):
lowercase_ : List[Any] = NezhaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowercase_ : int = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Dict ):
lowercase_ : Any = self.num_labels
lowercase_ : Union[str, Any] = NezhaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowercase_ : int = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : str , lowercase_ : Dict ):
lowercase_ : int = self.num_labels
lowercase_ : List[Any] = NezhaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowercase_ : List[Any] = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : int , lowercase_ : int , lowercase_ : Tuple , lowercase_ : Dict ):
lowercase_ : Union[str, Any] = self.num_choices
lowercase_ : Optional[Any] = NezhaForMultipleChoice(config=a_ )
model.to(a_ )
model.eval()
lowercase_ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase_ : int = model(
a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = self.prepare_config_and_inputs()
(
lowercase_
) : Tuple = config_and_inputs
lowercase_ : List[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __magic_name__ ( __a, __a, __a, unittest.TestCase):
UpperCamelCase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase__ = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : List[str]=False ):
lowercase_ : Any = super()._prepare_for_class(a_ , a_ , return_labels=a_ )
if return_labels:
if model_class in get_values(a_ ):
lowercase_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a_ )
lowercase_ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : int = NezhaModelTester(self )
lowercase_ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
(
lowercase_
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
lowercase_ : List[Any] = None
self.model_tester.create_and_check_model_as_decoder(
a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , a_ , )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[Any] = NezhaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowercase_ : str = True
lowercase_ : Tuple = model_class(config=a_ )
lowercase_ : Optional[int] = self._prepare_for_class(a_ , a_ )
lowercase_ : Optional[Any] = torch.jit.trace(
a_ , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a_ , os.path.join(a_ , """bert.pt""" ) )
lowercase_ : List[str] = torch.jit.load(os.path.join(a_ , """bert.pt""" ) , map_location=a_ )
loaded(inputs_dict["""input_ids"""].to(a_ ) , inputs_dict["""attention_mask"""].to(a_ ) )
@require_torch
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
lowercase_ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ : List[Any] = model(a_ , attention_mask=a_ )[0]
lowercase_ : str = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , a_ )
lowercase_ : List[Any] = torch.tensor([[[0.06_85, 0.24_41, 0.11_02], [0.06_00, 0.19_06, 0.13_49], [0.02_21, 0.08_19, 0.05_86]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Tuple = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
lowercase_ : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowercase_ : int = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ : Any = model(a_ , attention_mask=a_ )[0]
lowercase_ : Optional[int] = torch.Size((1, 6, 21128) )
self.assertEqual(output.shape , a_ )
lowercase_ : int = torch.tensor(
[[-2.79_39, -1.79_02, -2.21_89], [-2.85_85, -1.89_08, -2.37_23], [-2.64_99, -1.77_50, -2.25_58]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 706
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 0
|
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : str = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''efficientformer'''
def __init__( self : Optional[int] , lowercase_ : Union[str, Any] = [3, 2, 6, 4] , lowercase_ : Tuple = [48, 96, 224, 448] , lowercase_ : int = [True, True, True, True] , lowercase_ : str = 448 , lowercase_ : Optional[int] = 32 , lowercase_ : Tuple = 4 , lowercase_ : Optional[int] = 7 , lowercase_ : Union[str, Any] = 5 , lowercase_ : str = 8 , lowercase_ : Dict = 4 , lowercase_ : Tuple = 0.0 , lowercase_ : Optional[int] = 16 , lowercase_ : Any = 3 , lowercase_ : Optional[Any] = 3 , lowercase_ : int = 3 , lowercase_ : Optional[Any] = 2 , lowercase_ : Union[str, Any] = 1 , lowercase_ : Dict = 0.0 , lowercase_ : int = 1 , lowercase_ : List[str] = True , lowercase_ : Optional[int] = True , lowercase_ : List[Any] = 1E-5 , lowercase_ : Optional[Any] = "gelu" , lowercase_ : int = 0.02 , lowercase_ : int = 1E-12 , lowercase_ : Union[str, Any] = 224 , lowercase_ : Optional[int] = 1E-05 , **lowercase_ : str , ):
super().__init__(**__lowerCAmelCase )
lowercase_ : Tuple = hidden_act
lowercase_ : Dict = hidden_dropout_prob
lowercase_ : Tuple = hidden_sizes
lowercase_ : Union[str, Any] = num_hidden_layers
lowercase_ : int = num_attention_heads
lowercase_ : Dict = initializer_range
lowercase_ : Union[str, Any] = layer_norm_eps
lowercase_ : Optional[Any] = patch_size
lowercase_ : Dict = num_channels
lowercase_ : Tuple = depths
lowercase_ : List[str] = mlp_expansion_ratio
lowercase_ : Optional[int] = downsamples
lowercase_ : Optional[Any] = dim
lowercase_ : Optional[int] = key_dim
lowercase_ : List[str] = attention_ratio
lowercase_ : int = resolution
lowercase_ : Any = pool_size
lowercase_ : Union[str, Any] = downsample_patch_size
lowercase_ : Dict = downsample_stride
lowercase_ : Optional[Any] = downsample_pad
lowercase_ : Any = drop_path_rate
lowercase_ : Optional[Any] = num_metaad_blocks
lowercase_ : Optional[int] = distillation
lowercase_ : Optional[int] = use_layer_scale
lowercase_ : List[Any] = layer_scale_init_value
lowercase_ : str = image_size
lowercase_ : Union[str, Any] = batch_norm_eps
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 0
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[str] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/config.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/config.json''',
}
class __magic_name__ ( a__):
UpperCamelCase__ = """xlnet"""
UpperCamelCase__ = ["""mems"""]
UpperCamelCase__ = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str , lowercase_ : Dict=32000 , lowercase_ : List[str]=1024 , lowercase_ : Any=24 , lowercase_ : List[str]=16 , lowercase_ : Optional[Any]=4096 , lowercase_ : List[Any]="gelu" , lowercase_ : int=True , lowercase_ : List[str]="bi" , lowercase_ : Union[str, Any]=0.02 , lowercase_ : List[str]=1E-12 , lowercase_ : int=0.1 , lowercase_ : str=512 , lowercase_ : Dict=None , lowercase_ : Union[str, Any]=True , lowercase_ : Any=False , lowercase_ : int=False , lowercase_ : List[str]=-1 , lowercase_ : Optional[int]=False , lowercase_ : Dict="last" , lowercase_ : Dict=True , lowercase_ : str="tanh" , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[str]=5 , lowercase_ : List[str]=5 , lowercase_ : Any=5 , lowercase_ : Dict=1 , lowercase_ : List[str]=2 , **lowercase_ : List[str] , ):
lowercase_ : Any = vocab_size
lowercase_ : Tuple = d_model
lowercase_ : Tuple = n_layer
lowercase_ : Tuple = n_head
if d_model % n_head != 0:
raise ValueError(f'''\'d_model % n_head\' ({d_model % n_head}) should be equal to 0''' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'''`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})''' )
lowercase_ : Tuple = d_model // n_head
lowercase_ : Optional[Any] = ff_activation
lowercase_ : List[Any] = d_inner
lowercase_ : Any = untie_r
lowercase_ : str = attn_type
lowercase_ : List[Any] = initializer_range
lowercase_ : Any = layer_norm_eps
lowercase_ : List[Any] = dropout
lowercase_ : List[Any] = mem_len
lowercase_ : Optional[Any] = reuse_len
lowercase_ : List[str] = bi_data
lowercase_ : Dict = clamp_len
lowercase_ : Any = same_length
lowercase_ : int = summary_type
lowercase_ : str = summary_use_proj
lowercase_ : Tuple = summary_activation
lowercase_ : Tuple = summary_last_dropout
lowercase_ : List[Any] = start_n_top
lowercase_ : Tuple = end_n_top
lowercase_ : Dict = bos_token_id
lowercase_ : List[Any] = pad_token_id
lowercase_ : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"""The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"""
""" instead.""" , lowerCAmelCase__ , )
lowercase_ : Union[str, Any] = kwargs["use_cache"]
lowercase_ : Optional[Any] = use_mems_eval
lowercase_ : Dict = use_mems_train
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Optional[Any] ):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 708
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 0
|
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class __magic_name__ ( UpperCAmelCase__):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : str ):
with open(__lowerCAmelCase , encoding="""utf-8""" ) as input_file:
lowercase_ : Any = re.compile(r"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" )
lowercase_ : str = input_file.read()
lowercase_ : List[str] = regexp.search(__lowerCAmelCase )
return match
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str ):
with open(__lowerCAmelCase , encoding="""utf-8""" ) as input_file:
lowercase_ : str = re.compile(r"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL )
lowercase_ : Union[str, Any] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowercase_ : Dict = regexp.finditer(__lowerCAmelCase )
lowercase_ : List[Any] = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[int] = Path("""./datasets""" )
lowercase_ : Tuple = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCAmelCase ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[Any] = Path("""./datasets""" )
lowercase_ : List[Any] = list(dataset_paths.absolute().glob("""**/*.py""" ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCAmelCase ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 709
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
| 0
|
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> int:
lowercase_ : Tuple = args.pruning_method
lowercase_ : str = args.threshold
lowercase_ : Tuple = args.model_name_or_path.rstrip("""/""" )
lowercase_ : List[str] = args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
lowercase_ : Tuple = torch.load(os.path.join(_snake_case , """pytorch_model.bin""" ) )
lowercase_ : List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase_ : str = tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
lowercase_ : Optional[Any] = tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
lowercase_ : List[Any] = tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
lowercase_ : List[str] = MagnitudeBinarizer.apply(inputs=_snake_case , threshold=_snake_case )
lowercase_ : int = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase_ : List[Any] = name[:-6]
lowercase_ : Any = model[F'''{prefix_}mask_scores''']
lowercase_ : List[Any] = TopKBinarizer.apply(_snake_case , _snake_case )
lowercase_ : int = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase_ : Tuple = name[:-6]
lowercase_ : Dict = model[F'''{prefix_}mask_scores''']
lowercase_ : Optional[Any] = ThresholdBinarizer.apply(_snake_case , _snake_case , _snake_case )
lowercase_ : List[str] = tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase_ : int = name[:-6]
lowercase_ : str = model[F'''{prefix_}mask_scores''']
lowercase_ , lowercase_ : List[Any] = -0.1, 1.1
lowercase_ : str = torch.sigmoid(_snake_case )
lowercase_ : Dict = s * (r - l) + l
lowercase_ : Optional[int] = s_bar.clamp(min=0.0 , max=1.0 )
lowercase_ : Dict = tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError("""Unknown pruning method""" )
if target_model_path is None:
lowercase_ : List[Any] = os.path.join(
os.path.dirname(_snake_case ) , F'''bertarized_{os.path.basename(_snake_case )}''' )
if not os.path.isdir(_snake_case ):
shutil.copytree(_snake_case , _snake_case )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(_snake_case , os.path.join(_snake_case , """pytorch_model.bin""" ) )
print("""\nPruned model saved! See you later!""" )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
_lowercase : int = parser.parse_args()
main(args)
| 710
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
'''simple docstring'''
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __magic_name__ :
@staticmethod
def SCREAMING_SNAKE_CASE_ ( *lowercase_ : Optional[int] , **lowercase_ : Dict ):
pass
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> str:
lowercase_ : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def lowerCamelCase ( UpperCAmelCase__ : str ) -> List[str]:
lowercase_ : Tuple = np.array(_lowercase )
lowercase_ : Any = npimg.shape
return {"hash": hashimage(_lowercase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __magic_name__ ( unittest.TestCase):
UpperCamelCase__ = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items()) if MODEL_FOR_MASK_GENERATION_MAPPING else []))
UpperCamelCase__ = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items()) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []))
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : int ):
lowercase_ : Any = MaskGenerationPipeline(model=UpperCamelCase__ , image_processor=UpperCamelCase__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Any , lowercase_ : Optional[Any] ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def SCREAMING_SNAKE_CASE_ ( self : int ):
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : int = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
lowercase_ : List[str] = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
lowercase_ : int = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.04_44},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_21},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.01_67},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.01_32},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.00_53},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.99_67},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.9_93},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.99_09},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.98_79},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.98_34},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.97_16},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.96_12},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.95_99},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.95_52},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.95_32},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.95_16},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.94_99},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.94_83},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.94_64},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.9_43},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.9_43},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.94_08},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.93_35},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.93_26},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.92_62},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.89_99},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.89_86},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.89_84},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.88_73},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.88_71}
] , )
# fmt: on
@require_torch
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = '''facebook/sam-vit-huge'''
lowercase_ : Tuple = pipeline("""mask-generation""" , model=UpperCamelCase__ )
lowercase_ : Tuple = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
lowercase_ : Optional[Any] = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCamelCase__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.04_44},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.02_10},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.01_67},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.01_32},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.00_53},
] , )
| 711
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 0
|
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_lowercase : str = logging.getLogger()
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Dict = argparse.ArgumentParser()
parser.add_argument("""-f""" )
lowercase_ : Optional[Any] = parser.parse_args()
return args.f
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Tuple:
lowercase_ : str = {}
lowercase_ : Optional[int] = os.path.join(__UpperCamelCase , """all_results.json""" )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase , """r""" ) as f:
lowercase_ : str = json.load(__UpperCamelCase )
else:
raise ValueError(F'''can\'t find {path}''' )
return results
def lowerCamelCase ( ) -> int:
lowercase_ : List[str] = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
_lowercase : Optional[Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __magic_name__ ( _UpperCAmelCase):
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] ):
lowercase_ : Optional[Any] = tempfile.mkdtemp()
lowercase_ : List[str] = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
lowercase_ : Optional[int] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[Any] ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : str = self.get_auto_remove_tmp_dir()
lowercase_ : List[Any] = f'''\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
lowercase_ : int = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : int = self.get_auto_remove_tmp_dir()
lowercase_ : Optional[Any] = f'''\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
lowercase_ : Any = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 100 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowercase_ : str = f'''\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
lowercase_ : Any = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 42 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = 7 if get_gpu_count() > 1 else 2
lowercase_ : List[str] = self.get_auto_remove_tmp_dir()
lowercase_ : Any = f'''\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
lowercase_ : Optional[Any] = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = self.get_auto_remove_tmp_dir()
lowercase_ : Optional[Any] = f'''\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
lowercase_ : Tuple = get_results(_SCREAMING_SNAKE_CASE )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 28 )
self.assertGreaterEqual(result["""eval_exact"""] , 28 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = self.get_auto_remove_tmp_dir()
lowercase_ : Optional[int] = f'''\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
lowercase_ : str = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : int = self.get_auto_remove_tmp_dir()
lowercase_ : str = f'''\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
lowercase_ : int = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_rouge1"""] , 10 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowercase_ : List[Any] = f'''\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
lowercase_ : Union[str, Any] = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_bleu"""] , 30 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """translation_no_trainer""" ) ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : str = logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
lowercase_ : List[Any] = self.get_auto_remove_tmp_dir()
lowercase_ : Tuple = f'''\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '''.split()
run_command(self._launch_args + testargs )
lowercase_ : int = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.10 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = self.get_auto_remove_tmp_dir()
lowercase_ : Optional[Any] = f'''\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '''.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
lowercase_ : Optional[int] = get_results(_SCREAMING_SNAKE_CASE )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , """image_classification_no_trainer""" ) ) )
| 712
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 0
|
'''simple docstring'''
from math import pi
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] ) -> List[str]:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 713
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 0
|
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class __magic_name__ ( lowercase__, lowercase__, unittest.TestCase):
UpperCamelCase__ = AutoencoderKL
UpperCamelCase__ = """sample"""
UpperCamelCase__ = 1e-2
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Optional[int] = 4
lowercase_ : Optional[Any] = 3
lowercase_ : Any = (32, 32)
lowercase_ : str = floats_tensor((batch_size, num_channels) + sizes ).to(__lowercase )
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return (3, 32, 32)
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return (3, 32, 32)
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[Any] = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
lowercase_ : Tuple = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
# enable deterministic behavior for gradient checkpointing
lowercase_ , lowercase_ : Optional[Any] = self.prepare_init_args_and_inputs_for_common()
lowercase_ : List[str] = self.model_class(**__lowercase )
model.to(__lowercase )
assert not model.is_gradient_checkpointing and model.training
lowercase_ : str = model(**__lowercase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
lowercase_ : Dict = torch.randn_like(__lowercase )
lowercase_ : Dict = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
lowercase_ : List[str] = self.model_class(**__lowercase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__lowercase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
lowercase_ : Optional[int] = model_a(**__lowercase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
lowercase_ : str = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
lowercase_ : Dict = dict(model.named_parameters() )
lowercase_ : List[str] = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ , lowercase_ : Optional[int] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__lowercase )
lowercase_ : str = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
lowercase_ : str = model.to(__lowercase )
model.eval()
if torch_device == "mps":
lowercase_ : Any = torch.manual_seed(0 )
else:
lowercase_ : str = torch.Generator(device=__lowercase ).manual_seed(0 )
lowercase_ : Dict = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
lowercase_ : Optional[Any] = image.to(__lowercase )
with torch.no_grad():
lowercase_ : str = model(__lowercase , sample_posterior=__lowercase , generator=__lowercase ).sample
lowercase_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
lowercase_ : Tuple = torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
lowercase_ : Tuple = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
lowercase_ : Union[str, Any] = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(__lowercase , __lowercase , rtol=1E-2 ) )
@slow
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int , lowercase_ : str ):
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(__lowercase ) for s in shape] )}.npy'''
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[Any]=0 , lowercase_ : List[str]=(4, 3, 512, 512) , lowercase_ : List[Any]=False ):
lowercase_ : Dict = torch.floataa if fpaa else torch.floataa
lowercase_ : Optional[int] = torch.from_numpy(load_hf_numpy(self.get_file_format(__lowercase , __lowercase ) ) ).to(__lowercase ).to(__lowercase )
return image
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any="CompVis/stable-diffusion-v1-4" , lowercase_ : List[Any]=False ):
lowercase_ : Optional[int] = """fp16""" if fpaa else None
lowercase_ : Dict = torch.floataa if fpaa else torch.floataa
lowercase_ : int = AutoencoderKL.from_pretrained(
__lowercase , subfolder="""vae""" , torch_dtype=__lowercase , revision=__lowercase , )
model.to(__lowercase ).eval()
return model
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Optional[int]=0 ):
if torch_device == "mps":
return torch.manual_seed(__lowercase )
return torch.Generator(device=__lowercase ).manual_seed(__lowercase )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any] ):
lowercase_ : Optional[Any] = self.get_sd_vae_model()
lowercase_ : List[Any] = self.get_sd_image(__lowercase )
lowercase_ : Union[str, Any] = self.get_generator(__lowercase )
with torch.no_grad():
lowercase_ : Dict = model(__lowercase , generator=__lowercase , sample_posterior=__lowercase ).sample
assert sample.shape == image.shape
lowercase_ : Optional[Any] = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ : Optional[int] = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__lowercase , __lowercase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[int] , lowercase_ : Tuple ):
lowercase_ : int = self.get_sd_vae_model(fpaa=__lowercase )
lowercase_ : Optional[int] = self.get_sd_image(__lowercase , fpaa=__lowercase )
lowercase_ : Union[str, Any] = self.get_generator(__lowercase )
with torch.no_grad():
lowercase_ : List[Any] = model(__lowercase , generator=__lowercase , sample_posterior=__lowercase ).sample
assert sample.shape == image.shape
lowercase_ : Tuple = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ : Tuple = torch.tensor(__lowercase )
assert torch_all_close(__lowercase , __lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ):
lowercase_ : int = self.get_sd_vae_model()
lowercase_ : Optional[Any] = self.get_sd_image(__lowercase )
with torch.no_grad():
lowercase_ : Tuple = model(__lowercase ).sample
assert sample.shape == image.shape
lowercase_ : Any = sample[-1, -2:, -2:, :2].flatten().float().cpu()
lowercase_ : Any = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__lowercase , __lowercase , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple ):
lowercase_ : List[Any] = self.get_sd_vae_model()
lowercase_ : List[str] = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ : str = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().cpu()
lowercase_ : Union[str, Any] = torch.tensor(__lowercase )
assert torch_all_close(__lowercase , __lowercase , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Any ):
lowercase_ : Optional[Any] = self.get_sd_vae_model(fpaa=__lowercase )
lowercase_ : int = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) , fpaa=__lowercase )
with torch.no_grad():
lowercase_ : Optional[int] = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
lowercase_ : Dict = sample[-1, -2:, :2, -2:].flatten().float().cpu()
lowercase_ : int = torch.tensor(__lowercase )
assert torch_all_close(__lowercase , __lowercase , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Union[str, Any] ):
lowercase_ : Union[str, Any] = self.get_sd_vae_model(fpaa=__lowercase )
lowercase_ : Any = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) , fpaa=__lowercase )
with torch.no_grad():
lowercase_ : Optional[Any] = model.decode(__lowercase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ : int = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__lowercase , __lowercase , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any ):
lowercase_ : Union[str, Any] = self.get_sd_vae_model()
lowercase_ : Optional[Any] = self.get_sd_image(__lowercase , shape=(3, 4, 64, 64) )
with torch.no_grad():
lowercase_ : Any = model.decode(__lowercase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
lowercase_ : Any = model.decode(__lowercase ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(__lowercase , __lowercase , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] ):
lowercase_ : Tuple = self.get_sd_vae_model()
lowercase_ : Dict = self.get_sd_image(__lowercase )
lowercase_ : Any = self.get_generator(__lowercase )
with torch.no_grad():
lowercase_ : Dict = model.encode(__lowercase ).latent_dist
lowercase_ : Any = dist.sample(generator=__lowercase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
lowercase_ : Dict = sample[0, -1, -3:, -3:].flatten().cpu()
lowercase_ : int = torch.tensor(__lowercase )
lowercase_ : Tuple = 3E-3 if torch_device != """mps""" else 1E-2
assert torch_all_close(__lowercase , __lowercase , atol=__lowercase )
| 714
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 0
|
'''simple docstring'''
def lowerCamelCase ( ) -> list[list[int]]:
return [list(range(1000 - i , -1000 - i , -1 ) ) for i in range(1000 )]
_lowercase : Optional[Any] = generate_large_matrix()
_lowercase : Dict = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> None:
assert all(row == sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ) for row in grid )
assert all(list(_lowerCAmelCase ) == sorted(_lowerCAmelCase , reverse=_lowerCAmelCase ) for col in zip(*_lowerCAmelCase ) )
def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> int:
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = len(_lowerCAmelCase ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
lowercase_ : int = (left + right) // 2
lowercase_ : List[Any] = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
lowercase_ : Optional[int] = mid + 1
else:
lowercase_ : Union[str, Any] = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(_lowerCAmelCase )
def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> int:
lowercase_ : List[Any] = 0
lowercase_ : Any = len(grid[0] )
for i in range(len(_lowerCAmelCase ) ):
lowercase_ : Union[str, Any] = find_negative_index(grid[i][:bound] )
total += bound
return (len(_lowerCAmelCase ) * len(grid[0] )) - total
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> int:
return len([number for row in grid for number in row if number < 0] )
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> int:
lowercase_ : List[str] = 0
for row in grid:
for i, number in enumerate(_lowerCAmelCase ):
if number < 0:
total += len(_lowerCAmelCase ) - i
break
return total
def lowerCamelCase ( ) -> None:
from timeit import timeit
print("""Running benchmarks""" )
lowercase_ : Dict = (
"from __main__ import count_negatives_binary_search, "
"count_negatives_brute_force, count_negatives_brute_force_with_break, grid"
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
lowercase_ : List[str] = timeit(F'''{func}(grid=grid)''' , setup=_lowerCAmelCase , number=500 )
print(F'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 715
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
| 0
|
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[Any] = {
"""repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""],
"""path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""],
"""content""": ["""a """ * 20, """a """ * 30, """b """ * 7],
}
lowercase_ : Optional[int] = Dataset.from_dict(UpperCAmelCase__ )
return dataset
class __magic_name__ ( UpperCamelCase__):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = get_dataset()
lowercase_ : str = make_duplicate_clusters(lowercase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[str] = get_dataset()
lowercase_ , lowercase_ : Union[str, Any] = deduplicate_dataset(lowercase_ )
self.assertEqual(len(lowercase_ ) , 2 )
print(lowercase_ )
self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 )
self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , lowercase_ )
| 716
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True})
UpperCamelCase__ = Features({'''image''': Image()})
UpperCamelCase__ = Features({'''labels''': ClassLabel})
UpperCamelCase__ = "image"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ : List[str] = copy.deepcopy(self )
lowercase_ : List[str] = self.label_schema.copy()
lowercase_ : List[Any] = features[self.label_column]
lowercase_ : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> List[Any]:
lowercase_ : Dict = [0] * len(__lowerCAmelCase )
for i in range(1 , len(__lowerCAmelCase ) ):
# use last results for better performance - dynamic programming
lowercase_ : Optional[Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowercase_ : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowercase_ : Dict = j
return prefix_result
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> Optional[int]:
return max(prefix_function(__lowerCAmelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
| 0
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __magic_name__ ( __a):
UpperCamelCase__ = "mctct"
def __init__( self : Tuple , lowercase_ : Optional[int]=8065 , lowercase_ : int=1536 , lowercase_ : Optional[Any]=36 , lowercase_ : Optional[int]=6144 , lowercase_ : Optional[Any]=4 , lowercase_ : int=384 , lowercase_ : Union[str, Any]=920 , lowercase_ : Any=1E-5 , lowercase_ : int=0.3 , lowercase_ : Any="relu" , lowercase_ : List[Any]=0.02 , lowercase_ : int=0.3 , lowercase_ : Any=0.3 , lowercase_ : Optional[Any]=1 , lowercase_ : List[Any]=0 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=1 , lowercase_ : Tuple=0.3 , lowercase_ : List[str]=1 , lowercase_ : Optional[Any]=(7,) , lowercase_ : Optional[int]=(3,) , lowercase_ : str=80 , lowercase_ : int=1 , lowercase_ : Any=None , lowercase_ : Optional[Any]="sum" , lowercase_ : Optional[int]=False , **lowercase_ : Any , ):
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
lowercase_ : Tuple = vocab_size
lowercase_ : Tuple = hidden_size
lowercase_ : Optional[Any] = num_hidden_layers
lowercase_ : List[Any] = intermediate_size
lowercase_ : Union[str, Any] = num_attention_heads
lowercase_ : List[str] = attention_head_dim
lowercase_ : Optional[Any] = max_position_embeddings
lowercase_ : Optional[int] = layer_norm_eps
lowercase_ : Any = layerdrop
lowercase_ : Tuple = hidden_act
lowercase_ : List[str] = initializer_range
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Dict = pad_token_id
lowercase_ : List[str] = bos_token_id
lowercase_ : Dict = eos_token_id
lowercase_ : Optional[int] = conv_glu_dim
lowercase_ : int = conv_dropout
lowercase_ : Dict = num_conv_layers
lowercase_ : List[str] = input_feat_per_channel
lowercase_ : int = input_channels
lowercase_ : Optional[Any] = conv_channels
lowercase_ : List[str] = ctc_loss_reduction
lowercase_ : int = ctc_zero_infinity
# prevents config testing fail with exporting to json
lowercase_ : Union[str, Any] = list(lowerCAmelCase_ )
lowercase_ : List[Any] = list(lowerCAmelCase_ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 718
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if start is None:
lowercase_ : Any = 0
if end is None:
lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1
if start >= end:
return
lowercase_ : Optional[int] = (start + end) // 2
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end]
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
| 0
|
'''simple docstring'''
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def lowerCamelCase ( *UpperCAmelCase__ : Any ) -> Tuple:
with open(UpperCamelCase__ , """r""" ) as fh:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_EX )
try:
print(*UpperCamelCase__ )
finally:
fcntl.flock(UpperCamelCase__ , fcntl.LOCK_UN )
_lowercase : Union[str, Any] = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
_lowercase : Tuple = torch.device("cuda", local_rank)
_lowercase : int = socket.gethostname()
_lowercase : Tuple = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
_lowercase : Union[str, Any] = dist.get_rank()
_lowercase : str = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 719
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
| 0
|
'''simple docstring'''
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase : int = logging.get_logger(__name__)
class __magic_name__ ( enum.Enum):
UpperCamelCase__ = 0
UpperCamelCase__ = 1
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''generated'''
def __init__( self : str , *lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ):
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[Any]=None , lowercase_ : Tuple=None , lowercase_ : int=None , lowercase_ : Dict=None , lowercase_ : List[str]=None , lowercase_ : str=None , **lowercase_ : List[str] , ):
lowercase_ : Optional[Any] = {}
if truncation is not None:
lowercase_ : Optional[Any] = truncation
lowercase_ : Dict = generate_kwargs
lowercase_ : Tuple = {}
if return_tensors is not None and return_type is None:
lowercase_ : str = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase_ : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase_ : Union[str, Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase_ : List[Any] = self.tokenizer.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
lowercase_ : Union[str, Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
return True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , *lowercase_ : Dict , lowercase_ : Tuple ):
lowercase_ : int = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , UpperCAmelCase__ ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
lowercase_ : List[str] = ([prefix + arg for arg in args[0]],)
lowercase_ : str = True
elif isinstance(args[0] , UpperCAmelCase__ ):
lowercase_ : List[str] = (prefix + args[0],)
lowercase_ : List[Any] = False
else:
raise ValueError(
f''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
lowercase_ : Optional[int] = self.tokenizer(*UpperCAmelCase__ , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Optional[int] , *lowercase_ : Dict , **lowercase_ : Optional[Any] ):
lowercase_ : List[str] = super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
if (
isinstance(args[0] , UpperCAmelCase__ )
and all(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) for el in args[0] )
and all(len(UpperCAmelCase__ ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **lowercase_ : List[str] ):
lowercase_ : Dict = self._parse_and_tokenize(UpperCAmelCase__ , truncation=UpperCAmelCase__ , **UpperCAmelCase__ )
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Dict , **lowercase_ : Any ):
if self.framework == "pt":
lowercase_ , lowercase_ : int = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
lowercase_ , lowercase_ : Dict = tf.shape(model_inputs["""input_ids"""] ).numpy()
lowercase_ : Optional[int] = generate_kwargs.get("""min_length""" , self.model.config.min_length )
lowercase_ : Optional[Any] = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(UpperCAmelCase__ , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
lowercase_ : Optional[int] = self.model.generate(**UpperCAmelCase__ , **UpperCAmelCase__ )
lowercase_ : Any = output_ids.shape[0]
if self.framework == "pt":
lowercase_ : Union[str, Any] = output_ids.reshape(UpperCAmelCase__ , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase_ : str = tf.reshape(UpperCAmelCase__ , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] , lowercase_ : Optional[int]=ReturnType.TEXT , lowercase_ : Dict=False ):
lowercase_ : Optional[Any] = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase_ : str = {f'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
lowercase_ : Optional[int] = {
f'''{self.return_name}_text''': self.tokenizer.decode(
UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ , clean_up_tokenization_spaces=UpperCAmelCase__ , )
}
records.append(UpperCAmelCase__ )
return records
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''summary'''
def __call__( self : Union[str, Any] , *lowercase_ : Optional[int] , **lowercase_ : Union[str, Any] ):
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
if max_length < min_length:
logger.warning(f'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
f'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''translation'''
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
if input_length > 0.9 * max_length:
logger.warning(
f'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"""increasing your max_length manually, e.g. translator(\'...\', max_length=400)""" )
return True
def SCREAMING_SNAKE_CASE_ ( self : int , *lowercase_ : Optional[int] , lowercase_ : Any=TruncationStrategy.DO_NOT_TRUNCATE , lowercase_ : Optional[Any]=None , lowercase_ : List[Any]=None ):
if getattr(self.tokenizer , """_build_translation_inputs""" , UpperCAmelCase__ ):
return self.tokenizer._build_translation_inputs(
*UpperCAmelCase__ , return_tensors=self.framework , truncation=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ )
else:
return super()._parse_and_tokenize(*UpperCAmelCase__ , truncation=UpperCAmelCase__ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Dict=None , lowercase_ : Optional[int]=None , **lowercase_ : int ):
lowercase_ , lowercase_ , lowercase_ : Any = super()._sanitize_parameters(**UpperCAmelCase__ )
if src_lang is not None:
lowercase_ : List[str] = src_lang
if tgt_lang is not None:
lowercase_ : List[str] = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase_ : int = kwargs.get("""task""" , self.task )
lowercase_ : List[Any] = task.split("""_""" )
if task and len(UpperCAmelCase__ ) == 4:
# translation, XX, to YY
lowercase_ : Union[str, Any] = items[1]
lowercase_ : Any = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *lowercase_ : List[str] , **lowercase_ : Any ):
return super().__call__(*UpperCAmelCase__ , **UpperCAmelCase__ )
| 720
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 0
|
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
_lowercase : Optional[int] = "\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
_lowercase : Tuple = "\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
_lowercase : Optional[Any] = "\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : int = 1 , lowercase_ : Union[str, Any] = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=A__ , hypotheses=A__ , min_len=A__ , max_len=A__ )
}
| 721
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : List[Any] ) -> bool:
if num < 0:
return False
lowercase_ : Dict = num
lowercase_ : Any = 0
while num > 0:
lowercase_ : Any = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
'''simple docstring'''
import math
import sys
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
if number != int(lowercase__ ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
lowercase_ : Any = [-1] * (number + 1)
lowercase_ : List[str] = 0
for i in range(1 , number + 1 ):
lowercase_ : int = sys.maxsize
lowercase_ : str = int(math.sqrt(lowercase__ ) )
for j in range(1 , root + 1 ):
lowercase_ : Optional[Any] = 1 + answers[i - (j**2)]
lowercase_ : Optional[int] = min(lowercase__ , lowercase__ )
lowercase_ : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
| 0
|
'''simple docstring'''
import torch
def lowerCamelCase ( ) -> str:
if torch.cuda.is_available():
lowercase_ : Tuple = torch.cuda.device_count()
else:
lowercase_ : List[str] = 0
print(F'''Successfully ran on {num_gpus} GPUs''' )
if __name__ == "__main__":
main()
| 702
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
| 0
|
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase : int = abspath(join(dirname(__file__), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> List[Any]:
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> int:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase_ : int = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ) -> Any:
if exitstatus == 5:
lowercase_ : List[Any] = 0
# Doctest custom flag to ignore output.
_lowercase : Any = doctest.register_optionflag("IGNORE_RESULT")
_lowercase : int = doctest.OutputChecker
class __magic_name__ ( _UpperCAmelCase):
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[Any] ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase__ , lowercase__ , lowercase__ )
_lowercase : Union[str, Any] = CustomOutputChecker
_lowercase : Tuple = HfDoctestModule
_lowercase : Optional[int] = HfDocTestParser
| 703
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : int = 100 ) -> str:
lowercase_ : int = set()
lowercase_ : Dict = 0
lowercase_ : Union[str, Any] = n + 1 # maximum limit
for a in range(2 , a_ ):
for b in range(2 , a_ ):
lowercase_ : Tuple = a**b # calculates the current power
collect_powers.add(a_ ) # adds the result to the set
return len(a_ )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 704
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
| 0
|
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_lowercase : List[str] = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : str ) -> Dict:
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Dict:
lowercase_ : Tuple = _TestCommandArgs(dataset=UpperCAmelCase__ , all_configs=UpperCAmelCase__ , save_infos=UpperCAmelCase__ )
lowercase_ : str = TestCommand(*UpperCAmelCase__ )
test_command.run()
lowercase_ : Union[str, Any] = os.path.join(UpperCAmelCase__ , """README.md""" )
assert os.path.exists(UpperCAmelCase__ )
lowercase_ : List[str] = DatasetInfosDict.from_directory(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = DatasetInfosDict(
{
"""default""": DatasetInfo(
features=Features(
{
"""tokens""": Sequence(Value("""string""" ) ),
"""ner_tags""": Sequence(
ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ),
"""langs""": Sequence(Value("""string""" ) ),
"""spans""": Sequence(Value("""string""" ) ),
} ) , splits=[
{
"""name""": """train""",
"""num_bytes""": 2351563,
"""num_examples""": 10000,
},
{
"""name""": """validation""",
"""num_bytes""": 238418,
"""num_examples""": 1000,
},
] , download_size=3940680 , dataset_size=2589981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
lowercase_ , lowercase_ : List[str] = getattr(dataset_infos["""default"""] , UpperCAmelCase__ ), getattr(expected_dataset_infos["""default"""] , UpperCAmelCase__ )
if key == "num_bytes":
assert is_apercent_close(UpperCAmelCase__ , UpperCAmelCase__ )
elif key == "splits":
assert list(UpperCAmelCase__ ) == list(UpperCAmelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 705
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 706
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 0
|
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> list[int]:
return [ord(__UpperCamelCase ) - 96 for elem in plain]
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> str:
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCamelCase ( ) -> None:
lowercase_ : Dict = encode(input("""-> """ ).strip().lower() )
print("""Encoded: """ , __UpperCamelCase )
print("""Decoded:""" , decode(__UpperCamelCase ) )
if __name__ == "__main__":
main()
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 0
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : str=0.999 , UpperCAmelCase__ : int="cosine" , ) -> Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCAmelCase__ : Optional[Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCAmelCase__ : List[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
lowercase_ : Any = []
for i in range(UpperCAmelCase__ ):
lowercase_ : int = i / num_diffusion_timesteps
lowercase_ : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase__ ) / alpha_bar_fn(UpperCAmelCase__ ) , UpperCAmelCase__ ) )
return torch.tensor(UpperCAmelCase__ , dtype=torch.floataa )
class __magic_name__ ( _lowerCAmelCase, _lowerCAmelCase):
UpperCamelCase__ = [e.name for e in KarrasDiffusionSchedulers]
UpperCamelCase__ = 2
@register_to_config
def __init__( self : List[str] , lowercase_ : int = 1000 , lowercase_ : float = 0.0_00_85 , lowercase_ : float = 0.0_12 , lowercase_ : str = "linear" , lowercase_ : Optional[Union[np.ndarray, List[float]]] = None , lowercase_ : str = "epsilon" , lowercase_ : Optional[bool] = False , lowercase_ : Optional[bool] = False , lowercase_ : float = 1.0 , lowercase_ : str = "linspace" , lowercase_ : int = 0 , ):
if trained_betas is not None:
lowercase_ : Optional[Any] = torch.tensor(_lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
lowercase_ : Union[str, Any] = torch.linspace(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase_ : int = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCAmelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase_ : Optional[int] = betas_for_alpha_bar(_lowerCAmelCase , alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
lowercase_ : List[Any] = betas_for_alpha_bar(_lowerCAmelCase , alpha_transform_type="""exp""" )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
lowercase_ : Any = 1.0 - self.betas
lowercase_ : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase_ : Dict = use_karras_sigmas
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] , lowercase_ : Optional[Any]=None ):
if schedule_timesteps is None:
lowercase_ : Dict = self.timesteps
lowercase_ : List[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
lowercase_ : str = 1 if len(_lowerCAmelCase ) > 1 else 0
else:
lowercase_ : List[Any] = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep
lowercase_ : List[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : torch.FloatTensor , lowercase_ : Union[float, torch.FloatTensor] , ):
lowercase_ : int = self.index_for_timestep(_lowerCAmelCase )
lowercase_ : Union[str, Any] = self.sigmas[step_index]
lowercase_ : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : int , lowercase_ : Union[str, torch.device] = None , lowercase_ : Optional[int] = None , ):
lowercase_ : str = num_inference_steps
lowercase_ : Optional[int] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
lowercase_ : Optional[Any] = np.linspace(0 , num_train_timesteps - 1 , _lowerCAmelCase , dtype=_lowerCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
lowercase_ : Optional[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase_ : int = (np.arange(0 , _lowerCAmelCase ) * step_ratio).round()[::-1].copy().astype(_lowerCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
lowercase_ : str = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
lowercase_ : Union[str, Any] = (np.arange(_lowerCAmelCase , 0 , -step_ratio )).round().copy().astype(_lowerCAmelCase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
lowercase_ : str = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
lowercase_ : Dict = np.log(_lowerCAmelCase )
lowercase_ : Optional[Any] = np.interp(_lowerCAmelCase , np.arange(0 , len(_lowerCAmelCase ) ) , _lowerCAmelCase )
if self.config.use_karras_sigmas:
lowercase_ : Any = self._convert_to_karras(in_sigmas=_lowerCAmelCase , num_inference_steps=self.num_inference_steps )
lowercase_ : str = np.array([self._sigma_to_t(_lowerCAmelCase , _lowerCAmelCase ) for sigma in sigmas] )
lowercase_ : str = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
lowercase_ : List[str] = torch.from_numpy(_lowerCAmelCase ).to(device=_lowerCAmelCase )
lowercase_ : int = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
lowercase_ : int = torch.from_numpy(_lowerCAmelCase )
lowercase_ : Dict = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_lowerCAmelCase ).startswith("""mps""" ):
# mps does not support float64
lowercase_ : int = timesteps.to(_lowerCAmelCase , dtype=torch.floataa )
else:
lowercase_ : Union[str, Any] = timesteps.to(device=_lowerCAmelCase )
# empty dt and derivative
lowercase_ : Optional[Any] = None
lowercase_ : Union[str, Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
lowercase_ : Union[str, Any] = defaultdict(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] ):
lowercase_ : Optional[int] = np.log(_lowerCAmelCase )
# get distribution
lowercase_ : List[Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
lowercase_ : Union[str, Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
lowercase_ : List[Any] = low_idx + 1
lowercase_ : Optional[int] = log_sigmas[low_idx]
lowercase_ : Dict = log_sigmas[high_idx]
# interpolate sigmas
lowercase_ : Tuple = (low - log_sigma) / (low - high)
lowercase_ : Union[str, Any] = np.clip(_lowerCAmelCase , 0 , 1 )
# transform interpolation to time range
lowercase_ : Tuple = (1 - w) * low_idx + w * high_idx
lowercase_ : Optional[int] = t.reshape(sigma.shape )
return t
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : torch.FloatTensor , lowercase_ : int ):
lowercase_ : str = in_sigmas[-1].item()
lowercase_ : str = in_sigmas[0].item()
lowercase_ : Union[str, Any] = 7.0 # 7.0 is the value used in the paper
lowercase_ : str = np.linspace(0 , 1 , _lowerCAmelCase )
lowercase_ : List[str] = sigma_min ** (1 / rho)
lowercase_ : List[Any] = sigma_max ** (1 / rho)
lowercase_ : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def SCREAMING_SNAKE_CASE_ ( self : Any ):
return self.dt is None
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[torch.FloatTensor, np.ndarray] , lowercase_ : Union[float, torch.FloatTensor] , lowercase_ : Union[torch.FloatTensor, np.ndarray] , lowercase_ : bool = True , ):
lowercase_ : Any = self.index_for_timestep(_lowerCAmelCase )
# advance index counter by 1
lowercase_ : List[str] = timestep.cpu().item() if torch.is_tensor(_lowerCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
lowercase_ : str = self.sigmas[step_index]
lowercase_ : List[str] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
lowercase_ : Any = self.sigmas[step_index - 1]
lowercase_ : List[str] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
lowercase_ : Dict = 0
lowercase_ : Tuple = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
lowercase_ : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
lowercase_ : int = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
lowercase_ : int = sigma_hat if self.state_in_first_order else sigma_next
lowercase_ : str = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
lowercase_ : List[Any] = model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.config.clip_sample:
lowercase_ : List[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
lowercase_ : Optional[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
lowercase_ : Tuple = sigma_next - sigma_hat
# store for 2nd order step
lowercase_ : Optional[Any] = derivative
lowercase_ : Dict = dt
lowercase_ : int = sample
else:
# 2. 2nd order / Heun's method
lowercase_ : Dict = (sample - pred_original_sample) / sigma_next
lowercase_ : Union[str, Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
lowercase_ : List[Any] = self.dt
lowercase_ : Union[str, Any] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
lowercase_ : int = None
lowercase_ : Optional[Any] = None
lowercase_ : Union[str, Any] = None
lowercase_ : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , ):
lowercase_ : str = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_lowerCAmelCase ):
# mps does not support float64
lowercase_ : Optional[int] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
lowercase_ : Tuple = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
lowercase_ : List[str] = self.timesteps.to(original_samples.device )
lowercase_ : int = timesteps.to(original_samples.device )
lowercase_ : Tuple = [self.index_for_timestep(_lowerCAmelCase , _lowerCAmelCase ) for t in timesteps]
lowercase_ : Optional[int] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
lowercase_ : int = sigma.unsqueeze(-1 )
lowercase_ : str = original_samples + noise * sigma
return noisy_samples
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 708
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Any = logging.get_logger(__name__)
_lowercase : Union[str, Any] = torch.device("cpu")
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : List[str] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ : Optional[Any] = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> List[str]:
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703e00, 2.1107e00, -2.0811e00, 8.8685e-01, 2.4360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636e-01, 2.3478e-01, -1.6963e00, -1.7381e00, -8.6337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768e-01, -4.7429e-01, -1.0897e00, -1.0248e00, 3.5523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330e-01, 2.4211e-01, -6.0185e-01, -8.2789e-01, -6.0446e-02] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple ) -> Optional[Any]:
lowercase_ : Tuple = dct.pop(snake_case_ )
lowercase_ : Optional[int] = val
def lowerCamelCase ( UpperCAmelCase__ : Any ) -> Tuple:
lowercase_ : int = []
for k in state_dict.keys():
lowercase_ : Dict = k
if ".pwconv" in k:
lowercase_ : Dict = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
lowercase_ : int = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
lowercase_ : Tuple = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
lowercase_ : str = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
lowercase_ : Optional[int] = k_new.split(""".""" )
if ls[2].isdigit():
lowercase_ : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
lowercase_ : int = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ) -> Optional[Any]:
lowercase_ : List[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowercase_ : Dict = 1000
lowercase_ : Optional[int] = """huggingface/label-files"""
lowercase_ : Optional[Any] = """imagenet-1k-id2label.json"""
lowercase_ : Optional[int] = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) )
lowercase_ : Any = {int(snake_case_ ): v for k, v in idalabel.items()}
lowercase_ : Optional[Any] = idalabel
lowercase_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowercase_ : Dict = [3, 3, 6, 4]
lowercase_ : Optional[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowercase_ : List[str] = [3, 3, 9, 6]
lowercase_ : List[str] = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowercase_ : Optional[Any] = [4, 3, 10, 5]
lowercase_ : Dict = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowercase_ : List[str] = [4, 4, 12, 6]
lowercase_ : Any = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
lowercase_ : Any = torch.hub.load_state_dict_from_url(snake_case_ , map_location="""cpu""" , check_hash=snake_case_ )
else:
lowercase_ : Union[str, Any] = torch.load(snake_case_ , map_location="""cpu""" )
lowercase_ : List[str] = checkpoint
lowercase_ : List[Any] = create_rename_keys(snake_case_ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
# load HuggingFace model
lowercase_ : Union[str, Any] = SwiftFormerForImageClassification(snake_case_ ).eval()
hf_model.load_state_dict(snake_case_ )
# prepare test inputs
lowercase_ : Tuple = prepare_img()
lowercase_ : Dict = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
lowercase_ : Optional[Any] = processor(images=snake_case_ , return_tensors="""pt""" )
# compare outputs from both models
lowercase_ : Optional[int] = get_expected_output(snake_case_ )
lowercase_ : Union[str, Any] = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , snake_case_ , atol=1e-3 )
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
_lowercase : str = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 709
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
| 0
|
'''simple docstring'''
import itertools
import math
def lowerCamelCase ( UpperCAmelCase__ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase ( ) -> int:
lowercase_ : List[Any] = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def lowerCamelCase ( UpperCAmelCase__ : int = 10001 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , __UpperCamelCase ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 710
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : Dict = logging.get_logger(__name__)
_lowercase : Dict = "▁"
_lowercase : List[Any] = {"vocab_file": "spiece.model"}
_lowercase : List[Any] = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
_lowercase : List[Any] = {
"google/reformer-crime-and-punishment": 524288,
}
class __magic_name__ ( lowerCAmelCase__):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = ["input_ids", "attention_mask"]
def __init__( self : int , lowercase_ : Any , lowercase_ : str="</s>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Optional[Any]=[] , lowercase_ : str = None , **lowercase_ : str , ):
lowercase_ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
lowercase_ : Tuple = vocab_file
lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[Any] = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple ):
lowercase_ : Any = self.__dict__.copy()
lowercase_ : Any = None
return state
def __setstate__( self : str , lowercase_ : Optional[Any] ):
lowercase_ : Any = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase_ : Optional[Any] = {}
lowercase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : Union[str, Any] ):
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] ):
return self.sp_model.piece_to_id(_lowerCamelCase )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[int] ):
if index < self.sp_model.get_piece_size():
lowercase_ : List[Any] = self.sp_model.IdToPiece(_lowerCamelCase )
return token
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[Any] ):
lowercase_ : Any = []
lowercase_ : Tuple = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_lowerCamelCase ) + token
lowercase_ : Optional[Any] = []
else:
current_sub_tokens.append(_lowerCamelCase )
out_string += self.sp_model.decode(_lowerCamelCase )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : List[str] = os.path.join(
_lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , """wb""" ) as fi:
lowercase_ : Dict = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 711
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 0
|
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowercase : Tuple = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Any:
lowercase_ : Optional[Any] = {}
state_dict.pop("""pixel_mean""" , __snake_case )
state_dict.pop("""pixel_std""" , __snake_case )
lowercase_ : int = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowercase_ : int = key.replace(__snake_case , __snake_case )
if re.match(__snake_case , __snake_case ):
lowercase_ : Optional[Any] = int(re.match(__snake_case , __snake_case ).group(2 ) )
if layer_nb == 0:
lowercase_ : Tuple = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
lowercase_ : List[Any] = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
lowercase_ : int = key.replace("""layers.2""" , """proj_out""" )
lowercase_ : Any = value
lowercase_ : List[Any] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any]="ybelkada/segment-anything" ) -> str:
lowercase_ : int = hf_hub_download(__snake_case , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
lowercase_ : List[Any] = SamConfig()
elif "sam_vit_l" in model_name:
lowercase_ : Any = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowercase_ : Dict = SamConfig(
vision_config=__snake_case , )
elif "sam_vit_h" in model_name:
lowercase_ : Optional[Any] = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowercase_ : Optional[Any] = SamConfig(
vision_config=__snake_case , )
lowercase_ : Optional[Any] = torch.load(__snake_case , map_location="""cpu""" )
lowercase_ : List[str] = replace_keys(__snake_case )
lowercase_ : List[str] = SamImageProcessor()
lowercase_ : Any = SamProcessor(image_processor=__snake_case )
lowercase_ : Dict = SamModel(__snake_case )
hf_model.load_state_dict(__snake_case )
lowercase_ : Optional[Any] = hf_model.to("""cuda""" )
lowercase_ : Dict = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
lowercase_ : List[Any] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert("""RGB""" )
lowercase_ : Tuple = [[[400, 650]]]
lowercase_ : Tuple = [[1]]
lowercase_ : str = processor(images=np.array(__snake_case ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase_ : Tuple = hf_model(**__snake_case )
lowercase_ : Optional[Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
lowercase_ : Any = processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase_ : List[Any] = hf_model(**__snake_case )
lowercase_ : str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
lowercase_ : Tuple = ((75, 275, 1725, 850),)
lowercase_ : str = processor(images=np.array(__snake_case ) , input_boxes=__snake_case , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase_ : Optional[int] = hf_model(**__snake_case )
lowercase_ : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
lowercase_ : Dict = [[[400, 650], [800, 650]]]
lowercase_ : Optional[Any] = [[1, 1]]
lowercase_ : str = processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowercase_ : Any = hf_model(**__snake_case )
lowercase_ : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
_lowercase : List[str] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
_lowercase : List[Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 712
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 0
|
'''simple docstring'''
import datasets
from .evaluate import evaluate
_lowercase : Optional[Any] = "\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n"
_lowercase : Tuple = "\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n"
_lowercase : List[str] = "\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric(\"cuad\")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Tuple , lowercase_ : Tuple ):
lowercase_ : Optional[Any] = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
lowercase_ : str = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
lowercase_ : str = evaluate(dataset=__A , predictions=__A )
return score
| 713
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ ( unittest.TestCase):
def __init__( self : Dict , lowercase_ : List[Any] , lowercase_ : List[str]=3 , lowercase_ : Any=32 , lowercase_ : Dict=3 , lowercase_ : Optional[int]=10 , lowercase_ : Dict=[10, 20, 30, 40] , lowercase_ : List[str]=[1, 1, 2, 1] , lowercase_ : int=True , lowercase_ : str=True , lowercase_ : List[str]="relu" , lowercase_ : Optional[int]=3 , lowercase_ : List[Any]=None , ):
lowercase_ : List[Any] = parent
lowercase_ : List[Any] = batch_size
lowercase_ : List[Any] = image_size
lowercase_ : Any = num_channels
lowercase_ : List[Any] = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : str = depths
lowercase_ : Union[str, Any] = is_training
lowercase_ : List[Any] = use_labels
lowercase_ : str = hidden_act
lowercase_ : Tuple = num_labels
lowercase_ : Optional[int] = scope
lowercase_ : Tuple = len(__A )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : Any = self.get_config()
return config, pixel_values
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : int , lowercase_ : str ):
lowercase_ : Optional[int] = FlaxRegNetModel(config=__A )
lowercase_ : str = model(__A )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Any ):
lowercase_ : Optional[Any] = self.num_labels
lowercase_ : List[str] = FlaxRegNetForImageClassification(config=__A )
lowercase_ : Dict = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.prepare_config_and_inputs()
lowercase_ , lowercase_ : Union[str, Any] = config_and_inputs
lowercase_ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __magic_name__ ( UpperCamelCase__, unittest.TestCase):
UpperCamelCase__ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Union[str, Any] = FlaxRegNetModelTester(self )
lowercase_ : List[str] = ConfigTester(self , config_class=__A , has_text_modality=__A )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : int ):
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[int] = model_class(__A )
lowercase_ : Optional[int] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Dict = [*signature.parameters.keys()]
lowercase_ : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __A )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any] ):
lowercase_ : Optional[int] = model_class(__A )
lowercase_ : Optional[int] = model(**self._prepare_for_class(__A , __A ) )
lowercase_ : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : List[str] = self.model_tester.num_stages
self.assertEqual(len(__A ) , expected_num_stages + 1 )
lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Dict = True
check_hidden_states_output(__A , __A , __A )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowercase_ : Any = self._prepare_for_class(__A , __A )
lowercase_ : Union[str, Any] = model_class(__A )
@jax.jit
def model_jitted(lowercase_ : Dict , **lowercase_ : Any ):
return model(pixel_values=__A , **__A )
with self.subTest("""JIT Enabled""" ):
lowercase_ : Optional[Any] = model_jitted(**__A ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
lowercase_ : Union[str, Any] = model_jitted(**__A ).to_tuple()
self.assertEqual(len(__A ) , len(__A ) )
for jitted_output, output in zip(__A , __A ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCamelCase ( ):
lowercase_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_flax
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return AutoImageProcessor.from_pretrained("""facebook/regnet-y-040""" ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[str] = FlaxRegNetForImageClassification.from_pretrained("""facebook/regnet-y-040""" )
lowercase_ : Union[str, Any] = self.default_image_processor
lowercase_ : str = prepare_img()
lowercase_ : int = image_processor(images=__A , return_tensors="""np""" )
lowercase_ : List[str] = model(**__A )
# verify the logits
lowercase_ : Optional[Any] = (1, 1000)
self.assertEqual(outputs.logits.shape , __A )
lowercase_ : List[str] = jnp.array([-0.41_80, -1.50_51, -3.48_36] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
| 714
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str ) -> List[str]:
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def lowerCamelCase ( UpperCAmelCase__ : List[str] ) -> Optional[Any]:
lowercase_ : Optional[Any] = 0
lowercase_ : Dict = number
while duplicate > 0:
lowercase_ , lowercase_ : Union[str, Any] = divmod(_UpperCAmelCase , 10 )
fact_sum += factorial(_UpperCAmelCase )
return fact_sum == number
if __name__ == "__main__":
print("Program to check whether a number is a Krisnamurthy Number or not.")
_lowercase : int = int(input("Enter number: ").strip())
print(
f"""{number} is {"" if krishnamurthy(number) else "not "}a Krishnamurthy Number."""
)
| 715
|
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : List[Any] = """ylacombe/bark-small"""
lowercase_ : List[str] = tempfile.mkdtemp()
lowercase_ : Tuple = """en_speaker_1"""
lowercase_ : Union[str, Any] = """This is a test string"""
lowercase_ : int = """speaker_embeddings_path.json"""
lowercase_ : Any = """speaker_embeddings"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **lowercase_ : Optional[int] ):
return AutoTokenizer.from_pretrained(self.checkpoint , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Any = self.get_tokenizer()
lowercase_ : Optional[Any] = BarkProcessor(tokenizer=lowercase_ )
processor.save_pretrained(self.tmpdirname )
lowercase_ : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase_ : List[str] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowercase_ : Optional[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase_ : Optional[int] = 35
lowercase_ : int = 2
lowercase_ : Union[str, Any] = 8
lowercase_ : Union[str, Any] = {
"""semantic_prompt""": np.ones(lowercase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase_ : str = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : Dict = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase_ : Any = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowercase_ , **lowercase_ )
lowercase_ : Optional[Any] = processor(text=self.input_string , voice_preset=lowercase_ )
lowercase_ : List[Any] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowercase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase_ : Union[str, Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : List[str] = self.get_tokenizer()
lowercase_ : int = BarkProcessor(tokenizer=lowercase_ )
lowercase_ : Any = processor(text=self.input_string )
lowercase_ : List[str] = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 30
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : int ) -> list[int]:
lowercase_ : Optional[Any] = 2
lowercase_ : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(UpperCAmelCase__ )
if n > 1:
factors.append(UpperCAmelCase__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716
|
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_UpperCAmelCase)
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = field(default='''image-classification''', metadata={'''include_in_asdict_even_if_is_default''': True})
UpperCamelCase__ = Features({'''image''': Image()})
UpperCamelCase__ = Features({'''labels''': ClassLabel})
UpperCamelCase__ = "image"
UpperCamelCase__ = "labels"
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
lowercase_ : List[str] = copy.deepcopy(self )
lowercase_ : List[str] = self.label_schema.copy()
lowercase_ : List[Any] = features[self.label_column]
lowercase_ : Optional[Any] = label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ ( self : int ):
return {
self.image_column: "image",
self.label_column: "labels",
}
| 30
| 0
|
'''simple docstring'''
from __future__ import annotations
_lowercase : Optional[int] = 1.6_021e-19 # units = C
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , ) -> Union[str, Any]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717
|
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
lowercase_ : str = 1.5
lowercase_ : List[Any] = int(factor * num_class_images )
lowercase_ : int = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=UpperCAmelCase__ )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase_ : List[str] = client.query(text=UpperCAmelCase__ )
if len(UpperCAmelCase__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase_ : List[str] = int(factor * num_images )
lowercase_ : List[str] = ClipClient(
url="""https://knn.laion.ai/knn-service""" , indice_name="""laion_400m""" , num_images=UpperCAmelCase__ , aesthetic_weight=0.1 , )
lowercase_ : List[str] = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = tqdm(desc="""downloading real regularization images""" , total=UpperCAmelCase__ )
with open(F'''{class_data_dir}/caption.txt''' , """w""" ) as fa, open(F'''{class_data_dir}/urls.txt''' , """w""" ) as fa, open(
F'''{class_data_dir}/images.txt''' , """w""" ) as fa:
while total < num_class_images:
lowercase_ : str = class_images[count]
count += 1
try:
lowercase_ : Union[str, Any] = requests.get(images["""url"""] )
if img.status_code == 200:
lowercase_ : List[str] = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , """wb""" ) as f:
f.write(img.content )
fa.write(images["""caption"""] + """\n""" )
fa.write(images["""url"""] + """\n""" )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + """\n""" )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Any = argparse.ArgumentParser("""""" , add_help=UpperCAmelCase__ )
parser.add_argument("""--class_prompt""" , help="""text prompt to retrieve images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--class_data_dir""" , help="""path to save images""" , required=UpperCAmelCase__ , type=UpperCAmelCase__ )
parser.add_argument("""--num_class_images""" , help="""number of images to download""" , default=200 , type=UpperCAmelCase__ )
return parser.parse_args()
if __name__ == "__main__":
_lowercase : Dict = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 30
| 0
|
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str]=None ) -> Tuple:
lowercase_ : Union[str, Any] = None
if token is not None:
lowercase_ : Optional[int] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowercase_ : Optional[int] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
lowercase_ : Any = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
lowercase_ : int = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
lowercase_ : Tuple = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(_lowerCamelCase ):
lowercase_ : List[str] = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCamelCase ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Tuple=None ) -> Tuple:
lowercase_ : str = None
if token is not None:
lowercase_ : int = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowercase_ : int = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
lowercase_ : int = requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
lowercase_ : Union[str, Any] = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
lowercase_ : Optional[int] = math.ceil((result["""total_count"""] - 100) / 100 )
for i in range(_lowerCamelCase ):
lowercase_ : Any = requests.get(url + F'''&page={i + 2}''' , headers=_lowerCamelCase ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple ) -> Optional[int]:
lowercase_ : Union[str, Any] = None
if token is not None:
lowercase_ : Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
lowercase_ : Union[str, Any] = requests.get(_lowerCamelCase , headers=_lowerCamelCase , allow_redirects=_lowerCamelCase )
lowercase_ : List[str] = result.headers["""Location"""]
lowercase_ : Optional[int] = requests.get(_lowerCamelCase , allow_redirects=_lowerCamelCase )
lowercase_ : List[Any] = os.path.join(_lowerCamelCase , F'''{artifact_name}.zip''' )
with open(_lowerCamelCase , """wb""" ) as fp:
fp.write(response.content )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int=None ) -> int:
lowercase_ : List[Any] = []
lowercase_ : Optional[Any] = []
lowercase_ : Optional[int] = None
with zipfile.ZipFile(_lowerCamelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(_lowerCamelCase ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_lowerCamelCase ) as f:
for line in f:
lowercase_ : Optional[Any] = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
lowercase_ : List[str] = line[: line.index(""": """ )]
lowercase_ : Any = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
lowercase_ : List[Any] = line[len("""FAILED """ ) :]
failed_tests.append(_lowerCamelCase )
elif filename == "job_name.txt":
lowercase_ : Union[str, Any] = line
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(_lowerCamelCase )} for `errors` '''
F'''and {len(_lowerCamelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""" )
lowercase_ : List[Any] = None
if job_name and job_links:
lowercase_ : int = job_links.get(_lowerCamelCase , _lowerCamelCase )
# A list with elements of the form (line of error, error, failed test)
lowercase_ : Optional[Any] = [x + [y] + [job_link] for x, y in zip(_lowerCamelCase , _lowerCamelCase )]
return result
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any]=None ) -> Tuple:
lowercase_ : int = []
lowercase_ : Dict = [os.path.join(_lowerCamelCase , _lowerCamelCase ) for p in os.listdir(_lowerCamelCase ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_lowerCamelCase , job_links=_lowerCamelCase ) )
return errors
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int=None ) -> Optional[Any]:
lowercase_ : Optional[int] = Counter()
counter.update([x[1] for x in logs] )
lowercase_ : Dict = counter.most_common()
lowercase_ : Any = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
lowercase_ : List[str] = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
lowercase_ : List[Any] = dict(sorted(r.items() , key=lambda UpperCAmelCase__ : item[1]["count"] , reverse=_lowerCamelCase ) )
return r
def lowerCamelCase ( UpperCAmelCase__ : Dict ) -> Any:
lowercase_ : List[Any] = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
lowercase_ : Optional[int] = test.split("""/""" )[2]
else:
lowercase_ : str = None
return test
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int=None ) -> List[Any]:
lowercase_ : Union[str, Any] = [(x[0], x[1], get_model(x[2] )) for x in logs]
lowercase_ : List[Any] = [x for x in logs if x[2] is not None]
lowercase_ : Optional[Any] = {x[2] for x in logs}
lowercase_ : List[str] = {}
for test in tests:
lowercase_ : int = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
lowercase_ : str = counter.most_common()
lowercase_ : int = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
lowercase_ : Optional[int] = sum(error_counts.values() )
if n_errors > 0:
lowercase_ : Optional[Any] = {"""count""": n_errors, """errors""": error_counts}
lowercase_ : Tuple = dict(sorted(r.items() , key=lambda UpperCAmelCase__ : item[1]["count"] , reverse=_lowerCamelCase ) )
return r
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Union[str, Any]:
lowercase_ : Optional[int] = """| no. | error | status |"""
lowercase_ : Optional[Any] = """|-:|:-|:-|"""
lowercase_ : int = [header, sep]
for error in reduced_by_error:
lowercase_ : Optional[int] = reduced_by_error[error]["""count"""]
lowercase_ : Union[str, Any] = F'''| {count} | {error[:100]} | |'''
lines.append(_lowerCamelCase )
return "\n".join(_lowerCamelCase )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> str:
lowercase_ : Optional[Any] = """| model | no. of errors | major error | count |"""
lowercase_ : Optional[Any] = """|-:|-:|-:|-:|"""
lowercase_ : Optional[int] = [header, sep]
for model in reduced_by_model:
lowercase_ : List[str] = reduced_by_model[model]["""count"""]
lowercase_ : str = list(reduced_by_model[model]["""errors"""].items() )[0]
lowercase_ : Optional[Any] = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(_lowerCamelCase )
return "\n".join(_lowerCamelCase )
if __name__ == "__main__":
_lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_lowercase : Union[str, Any] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_lowercase : int = get_job_links(args.workflow_run_id, token=args.token)
_lowercase : List[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_lowercase : Union[str, Any] = k.find(" / ")
_lowercase : int = k[index + len(" / ") :]
_lowercase : Optional[int] = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_lowercase : Optional[Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_lowercase : Union[str, Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_lowercase : List[str] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_lowercase : Dict = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_lowercase : Optional[int] = reduce_by_error(errors)
_lowercase : Union[str, Any] = reduce_by_model(errors)
_lowercase : str = make_github_table(reduced_by_error)
_lowercase : Union[str, Any] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 718
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list , UpperCAmelCase__ : int | None = None , UpperCAmelCase__ : int | None = None ) -> None:
if start is None:
lowercase_ : Any = 0
if end is None:
lowercase_ : List[Any] = len(UpperCAmelCase__ ) - 1
if start >= end:
return
lowercase_ : Optional[int] = (start + end) // 2
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
slowsort(UpperCAmelCase__ , mid + 1 , UpperCAmelCase__ )
if sequence[end] < sequence[mid]:
lowercase_ , lowercase_ : Dict = sequence[mid], sequence[end]
slowsort(UpperCAmelCase__ , UpperCAmelCase__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 30
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Union[str, Any] , lowercase_ : str , lowercase_ : int=13 , lowercase_ : Union[str, Any]=64 , lowercase_ : Tuple=2 , lowercase_ : Dict=3 , lowercase_ : Union[str, Any]=True , lowercase_ : List[str]=True , lowercase_ : Dict=32 , lowercase_ : List[str]=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Tuple=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[Any]=10 , lowercase_ : int=0.02 , lowercase_ : str=[1, 16, 4, 4] , lowercase_ : str=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : Tuple = image_size
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : List[str] = is_training
lowercase_ : List[Any] = use_labels
lowercase_ : Optional[Any] = hidden_size
lowercase_ : Any = num_hidden_layers
lowercase_ : List[Any] = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : str = attention_probs_dropout_prob
lowercase_ : int = type_sequence_label_size
lowercase_ : Dict = initializer_range
lowercase_ : Tuple = scope
lowercase_ : Dict = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowercase_ : Optional[int] = (self.image_size // 32) ** 2
lowercase_ : List[str] = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : str = None
if self.use_labels:
lowercase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Union[str, Any] = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A__ , )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : int ):
lowercase_ : Optional[int] = ViTHybridModel(config=A__ )
model.to(A__ )
model.eval()
lowercase_ : Tuple = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Optional[int] ):
lowercase_ : Dict = self.type_sequence_label_size
lowercase_ : Dict = ViTHybridForImageClassification(A__ )
model.to(A__ )
model.eval()
lowercase_ : Any = model(A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ : Union[str, Any] = config_and_inputs
lowercase_ : str = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Union[str, Any] = ViTHybridModelTester(self )
lowercase_ : List[Any] = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : int = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(A__ )
lowercase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Optional[int] = [*signature.parameters.keys()]
lowercase_ : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A__ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Any = _config_zero_init(A__ )
for model_class in self.all_model_classes:
lowercase_ : List[str] = model_class(config=A__ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowercase_ : Tuple = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Union[str, Any] = ViTHybridModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def lowerCamelCase ( ) -> Any:
lowercase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : str = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A__ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Tuple = prepare_img()
lowercase_ : Optional[int] = image_processor(images=A__ , return_tensors="""pt""" ).to(A__ )
# forward pass
with torch.no_grad():
lowercase_ : List[Any] = model(**A__ )
# verify the logits
lowercase_ : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A__ )
lowercase_ : List[Any] = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(A__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A__ , atol=1E-4 ) )
@slow
@require_accelerate
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : str = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowercase_ : Tuple = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
lowercase_ : Optional[Any] = prepare_img()
lowercase_ : Union[str, Any] = image_processor(images=A__ , return_tensors="""pt""" )
lowercase_ : Union[str, Any] = model(**A__ )
lowercase_ : Tuple = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowercase_ : Union[str, Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 719
|
'''simple docstring'''
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : Optional[Any] = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Dict = parser.parse_args()
_lowercase : Dict = "cpu"
_lowercase : str = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : Any = "path-to-your-trained-model"
_lowercase : str = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Optional[int] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Any = pipe.to(device)
# to channels last
_lowercase : Union[str, Any] = pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[Any] = pipe.vae.to(memory_format=torch.channels_last)
_lowercase : Union[str, Any] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Tuple = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : int = torch.randn(2, 4, 64, 64)
_lowercase : int = torch.rand(1) * 999
_lowercase : Union[str, Any] = torch.randn(2, 77, 768)
_lowercase : Optional[Any] = (sample, timestep, encoder_hidden_status)
try:
_lowercase : int = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : Optional[int] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : List[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : str = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : int = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : int = 666
_lowercase : Any = torch.Generator(device).manual_seed(seed)
_lowercase : int = {"generator": generator}
if args.steps is not None:
_lowercase : Optional[int] = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : List[Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 30
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bool:
if not all(x.isalpha() for x in string ):
raise ValueError("""String must only contain alphabetic characters.""" )
lowercase_ : Union[str, Any] = sorted(string.lower() )
return len(lowerCAmelCase__ ) == len(set(lowerCAmelCase__ ) )
if __name__ == "__main__":
_lowercase : Any = input("Enter a string ").strip()
_lowercase : List[str] = is_isogram(input_str)
print(f"""{input_str} is {"an" if isogram else "not an"} isogram.""")
| 720
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
_lowercase : Tuple = None
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
_lowercase : Optional[int] = {
"""vocab_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""xlnet-base-cased""": """https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json""",
"""xlnet-large-cased""": """https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json""",
},
}
_lowercase : Any = {
"""xlnet-base-cased""": None,
"""xlnet-large-cased""": None,
}
_lowercase : Optional[int] = """▁"""
# Segments (not really needed)
_lowercase : List[str] = 0
_lowercase : List[Any] = 1
_lowercase : Dict = 2
_lowercase : Union[str, Any] = 3
_lowercase : Dict = 4
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = "left"
UpperCamelCase__ = XLNetTokenizer
def __init__( self : Optional[Any] , lowercase_ : int=None , lowercase_ : int=None , lowercase_ : List[str]=False , lowercase_ : str=True , lowercase_ : int=False , lowercase_ : Tuple="<s>" , lowercase_ : Dict="</s>" , lowercase_ : List[Any]="<unk>" , lowercase_ : Optional[int]="<sep>" , lowercase_ : Any="<pad>" , lowercase_ : Optional[Any]="<cls>" , lowercase_ : Dict="<mask>" , lowercase_ : List[Any]=["<eop>", "<eod>"] , **lowercase_ : str , ):
lowercase_ : List[str] = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
lowercase_ : Tuple = 3
lowercase_ : List[str] = do_lower_case
lowercase_ : Optional[int] = remove_space
lowercase_ : str = keep_accents
lowercase_ : Any = vocab_file
lowercase_ : List[Any] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : str , lowercase_ : List[str] = None ):
lowercase_ : str = [self.sep_token_id]
lowercase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : List[Any] = None ):
lowercase_ : Tuple = [self.sep_token_id]
lowercase_ : Optional[int] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[Any] , lowercase_ : List[Any] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ : Optional[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 721
|
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
lowercase_ : List[Any] = np.shape(UpperCAmelCase__ )
lowercase_ : Dict = np.shape(UpperCAmelCase__ )
lowercase_ : int = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
lowercase_ : Optional[int] = (
"""Expected the same number of rows for A and B. """
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
lowercase_ : Optional[Any] = (
"""Expected the same number of columns for B and C. """
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
lowercase_ : Any = pseudo_inv
if a_inv is None:
try:
lowercase_ : List[str] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
"""Input matrix A is not invertible. Cannot compute Schur complement.""" )
return mat_c - mat_b.T @ a_inv @ mat_b
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
lowercase_ : Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : int = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Dict = np.array([[2, 1], [6, 3]] )
lowercase_ : Union[str, Any] = schur_complement(lowercase_ , lowercase_ , lowercase_ )
lowercase_ : List[Any] = np.block([[a, b], [b.T, c]] )
lowercase_ : Optional[int] = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
lowercase_ : int = np.linalg.det(lowercase_ )
self.assertAlmostEqual(lowercase_ , det_a * det_s )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : Optional[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : Union[str, Any] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : List[Any] = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase_ : List[Any] = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase_ : str = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(lowercase_ ):
schur_complement(lowercase_ , lowercase_ , lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 30
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any , UpperCAmelCase__ : Any ) -> Dict:
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
lowercase_ , lowercase_ : str = array[indexa], array[indexa]
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> List[Any]:
if length > 1:
lowercase_ : Optional[int] = int(length / 2 )
for i in range(UpperCAmelCase__ , low + middle ):
comp_and_swap(UpperCAmelCase__ , UpperCAmelCase__ , i + middle , UpperCAmelCase__ )
bitonic_merge(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
bitonic_merge(UpperCAmelCase__ , low + middle , UpperCAmelCase__ , UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Tuple ) -> int:
if length > 1:
lowercase_ : List[Any] = int(length / 2 )
bitonic_sort(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , 1 )
bitonic_sort(UpperCAmelCase__ , low + middle , UpperCAmelCase__ , 0 )
bitonic_merge(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
_lowercase : Tuple = [int(item.strip()) for item in user_input.split(",")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("\nSorted array in ascending order is: ", end="")
print(*unsorted, sep=", ")
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("Sorted array in descending order is: ", end="")
print(*unsorted, sep=", ")
| 700
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def lowerCamelCase ( UpperCAmelCase__ : bytes ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : Union[str, Any] = F'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(UpperCAmelCase__ )
lowercase_ : Dict = """""".join(bin(UpperCAmelCase__ )[2:].zfill(8 ) for byte in data )
lowercase_ : Union[str, Any] = len(UpperCAmelCase__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase_ : List[Any] = b"""=""" * ((6 - len(UpperCAmelCase__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(UpperCAmelCase__ ) % 6)
else:
lowercase_ : Union[str, Any] = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(UpperCAmelCase__ ) , 6 ) ).encode()
+ padding
)
def lowerCamelCase ( UpperCAmelCase__ : str ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowercase_ : List[str] = (
"""argument should be a bytes-like object or ASCII string, """
F'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(UpperCAmelCase__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
try:
lowercase_ : Optional[int] = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
lowercase_ : Any = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(UpperCAmelCase__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase_ : Optional[int] = encoded_data[:-padding]
lowercase_ : Any = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase_ : int = """""".join(
bin(B64_CHARSET.index(UpperCAmelCase__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase_ : Optional[int] = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(UpperCAmelCase__ ) , 8 )
]
return bytes(UpperCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase__ : list[list[int]] ) -> int:
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701
|
'''simple docstring'''
import argparse
_lowercase : Optional[int] = "docs/source/_static/js/custom.js"
def lowerCamelCase ( UpperCAmelCase__ : Tuple ) -> Dict:
with open(UpperCAmelCase__ , encoding="""utf-8""" , newline="""\n""" ) as f:
lowercase_ : Optional[int] = f.readlines()
lowercase_ : Tuple = 0
# First let's put the right version
while not lines[index].startswith("""const stableVersion =""" ):
index += 1
lowercase_ : Optional[Any] = F'''const stableVersion = "v{version}"\n'''
# Then update the dictionary
while not lines[index].startswith("""const versionMapping = {""" ):
index += 1
# We go until the end
while not lines[index].startswith("""}""" ):
index += 1
# We add the new version at the end
lines[index - 1] += F''' "v{version}": "v{version}",\n'''
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--version", help="Release version.")
_lowercase : Dict = parser.parse_args()
update_custom_js(args.version)
| 30
| 0
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
_lowercase : Tuple = None
_lowercase : Union[str, Any] = logging.get_logger(__name__)
_lowercase : Dict = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowercase : Tuple = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
_lowercase : Tuple = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
_lowercase : List[Any] = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __magic_name__ ( _UpperCAmelCase):
'''simple docstring'''
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = ['''input_ids''', '''attention_mask''']
UpperCamelCase__ = MBartTokenizer
UpperCamelCase__ = []
UpperCamelCase__ = []
def __init__( self : int , lowercase_ : Tuple=None , lowercase_ : Optional[int]=None , lowercase_ : int="<s>" , lowercase_ : Optional[int]="</s>" , lowercase_ : int="</s>" , lowercase_ : Tuple="<s>" , lowercase_ : Dict="<unk>" , lowercase_ : int="<pad>" , lowercase_ : List[Any]="<mask>" , lowercase_ : Optional[int]=None , lowercase_ : Optional[int]=None , lowercase_ : List[Any]=None , **lowercase_ : Union[str, Any] , ):
lowercase_ : int = AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token
super().__init__(
vocab_file=lowercase_ , tokenizer_file=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , cls_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , mask_token=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ , additional_special_tokens=lowercase_ , **lowercase_ , )
lowercase_ : Tuple = vocab_file
lowercase_ : List[Any] = False if not self.vocab_file else True
lowercase_ : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
lowercase_ : List[str] = {
lang_code: self.convert_tokens_to_ids(lowercase_ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowercase_ : Optional[int] = src_lang if src_lang is not None else 'en_XX'
lowercase_ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
lowercase_ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : str ):
lowercase_ : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ):
lowercase_ : Optional[int] = [self.sep_token_id]
lowercase_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] , lowercase_ : Optional[str] , **lowercase_ : List[Any] ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
lowercase_ : Union[str, Any] = src_lang
lowercase_ : Optional[Any] = self(lowercase_ , add_special_tokens=lowercase_ , return_tensors=lowercase_ , **lowercase_ )
lowercase_ : List[str] = self.convert_tokens_to_ids(lowercase_ )
lowercase_ : List[Any] = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : List[str] , lowercase_ : str = "en_XX" , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "ro_RO" , **lowercase_ : Tuple , ):
lowercase_ : str = src_lang
lowercase_ : Tuple = tgt_lang
return super().prepare_seqaseq_batch(lowercase_ , lowercase_ , **lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE_ ( self : int ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[Any] ):
lowercase_ : int = self.convert_tokens_to_ids(lowercase_ )
lowercase_ : Optional[Any] = []
lowercase_ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
lowercase_ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : str ):
lowercase_ : List[str] = self.convert_tokens_to_ids(lowercase_ )
lowercase_ : str = []
lowercase_ : Any = [self.eos_token_id, self.cur_lang_code]
lowercase_ : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowercase_ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
lowercase_ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(lowercase_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowercase_ : List[str] = os.path.join(
lowercase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ):
copyfile(self.vocab_file , lowercase_ )
return (out_vocab_file,)
| 702
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __magic_name__ :
def __init__( self : Tuple , lowercase_ : Tuple , lowercase_ : Any=3 , lowercase_ : int=32 , lowercase_ : str=3 , lowercase_ : int=10 , lowercase_ : Any=[8, 16, 32, 64] , lowercase_ : Tuple=[1, 1, 2, 1] , lowercase_ : Any=True , lowercase_ : int=True , lowercase_ : Any="relu" , lowercase_ : List[Any]=3 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=["stage2", "stage3", "stage4"] , lowercase_ : Optional[int]=[2, 3, 4] , lowercase_ : List[str]=1 , ):
lowercase_ : Any = parent
lowercase_ : str = batch_size
lowercase_ : Any = image_size
lowercase_ : Optional[Any] = num_channels
lowercase_ : Any = embeddings_size
lowercase_ : Union[str, Any] = hidden_sizes
lowercase_ : Any = depths
lowercase_ : Dict = is_training
lowercase_ : Tuple = use_labels
lowercase_ : str = hidden_act
lowercase_ : Optional[Any] = num_labels
lowercase_ : Tuple = scope
lowercase_ : Any = len(lowercase_ )
lowercase_ : Optional[Any] = out_features
lowercase_ : Tuple = out_indices
lowercase_ : str = num_groups
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ : List[Any] = None
if self.use_labels:
lowercase_ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : int = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = BitModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[Any] ):
lowercase_ : Union[str, Any] = self.num_labels
lowercase_ : Tuple = BitForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Any = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] ):
lowercase_ : Any = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Dict = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
lowercase_ : List[str] = None
lowercase_ : Dict = BitBackbone(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : Tuple = model(lowercase_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Optional[int] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : Optional[Any] = config_and_inputs
lowercase_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : int = BitModelTester(self )
lowercase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return
@unittest.skip(reason="""Bit does not output attentions""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Optional[Any] = model_class(lowercase_ )
lowercase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
lowercase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[Any] = model_class(config=lowercase_ )
for name, module in model.named_modules():
if isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : Optional[Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : str = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase_ : Optional[int] = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : Dict = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase_ : Union[str, Any] = layer_type
lowercase_ : Optional[Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Union[str, Any] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : List[str] = BitModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : List[str] = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowercase_ )
lowercase_ : int = self.default_image_processor
lowercase_ : List[Any] = prepare_img()
lowercase_ : Dict = image_processor(images=lowercase_ , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : str = model(**lowercase_ )
# verify the logits
lowercase_ : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : Union[str, Any] = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@require_torch
class __magic_name__ ( _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (BitBackbone,) if is_torch_available() else ()
UpperCamelCase__ = BitConfig
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Union[str, Any] = BitModelTester(self )
| 30
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class __magic_name__ ( __A, __A, __A, unittest.TestCase):
UpperCamelCase__ = StableUnCLIPImgaImgPipeline
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase__ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase__ = frozenset([])
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Dict = 32
lowercase_ : Tuple = embedder_hidden_size
# image encoding components
lowercase_ : Optional[Any] = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowercase_ , projection_dim=lowercase_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase_ : int = StableUnCLIPImageNormalizer(embedding_dim=lowercase_ )
lowercase_ : Optional[Any] = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" )
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
torch.manual_seed(0 )
lowercase_ : Tuple = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowercase_ , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase_ : Optional[int] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock2D""", """DownBlock2D""") , up_block_types=("""UpBlock2D""", """CrossAttnUpBlock2D""") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="""projection""" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowercase_ , layers_per_block=1 , upcast_attention=lowercase_ , use_linear_projection=lowercase_ , )
torch.manual_seed(0 )
lowercase_ : Dict = DDIMScheduler(
beta_schedule="""scaled_linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , prediction_type="""v_prediction""" , set_alpha_to_one=lowercase_ , steps_offset=1 , )
torch.manual_seed(0 )
lowercase_ : Dict = AutoencoderKL()
lowercase_ : Dict = {
# image encoding components
'feature_extractor': feature_extractor,
'image_encoder': image_encoder.eval(),
# image noising components
'image_normalizer': image_normalizer.eval(),
'image_noising_scheduler': image_noising_scheduler,
# regular denoising components
'tokenizer': tokenizer,
'text_encoder': text_encoder.eval(),
'unet': unet.eval(),
'scheduler': scheduler,
'vae': vae.eval(),
}
return components
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Tuple , lowercase_ : List[Any]=0 , lowercase_ : Union[str, Any]=True ):
if str(lowercase_ ).startswith("""mps""" ):
lowercase_ : Union[str, Any] = torch.manual_seed(lowercase_ )
else:
lowercase_ : Union[str, Any] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowercase_ : List[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
if pil_image:
lowercase_ : Any = input_image * 0.5 + 0.5
lowercase_ : str = input_image.clamp(0 , 1 )
lowercase_ : Any = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ : int = DiffusionPipeline.numpy_to_pil(lowercase_ )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ : Tuple = self.get_dummy_components()
lowercase_ : Optional[int] = StableUnCLIPImgaImgPipeline(**lowercase_ )
lowercase_ : int = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowercase_ : Optional[Any] = self.get_dummy_inputs(lowercase_ )
inputs.update({"""image_embeds""": None} )
lowercase_ : Optional[int] = sd_pipe(**lowercase_ ).images
lowercase_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ : Dict = np.array([0.38_72, 0.72_24, 0.56_01, 0.47_41, 0.68_72, 0.58_14, 0.46_36, 0.38_67, 0.50_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE_ ( self : int ):
lowercase_ : Tuple = torch_device in ['cpu', 'mps']
self._test_attention_slicing_forward_pass(test_max_difference=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
lowercase_ : int = torch_device in ['cpu', 'mps']
self._test_inference_batch_single_identical(test_max_difference=lowercase_ )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowercase_ )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
lowercase_ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy""" )
lowercase_ : Union[str, Any] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-l-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ : Optional[Any] = pipe(lowercase_ , """anime turle""" , generator=lowercase_ , output_type="""np""" )
lowercase_ : Any = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ : Union[str, Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
lowercase_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy""" )
lowercase_ : Optional[int] = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowercase_ : str = pipe(lowercase_ , """anime turle""" , generator=lowercase_ , output_type="""np""" )
lowercase_ : int = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowercase_ , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png""" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ : int = StableUnCLIPImgaImgPipeline.from_pretrained(
"""fusing/stable-unclip-2-1-h-img2img""" , torch_dtype=torch.floataa )
lowercase_ : int = pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase_ : Optional[Any] = pipe(
lowercase_ , """anime turtle""" , num_inference_steps=2 , output_type="""np""" , )
lowercase_ : int = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 703
|
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
_lowercase : Optional[Any] = [
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
_lowercase : List[Any] = [
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def lowerCamelCase ( ) -> List[str]:
lowercase_ : str = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : int = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , bootstrap_aggregation=UpperCAmelCase__ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Tuple = """rougeLsum"""
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
lowercase_ : Optional[Any] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=[k] )[k]
assert score > score_no_sep
def lowerCamelCase ( ) -> List[Any]:
lowercase_ : Optional[int] = ["""rouge1""", """rouge2""", """rougeL"""]
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
lowercase_ : Tuple = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ , rouge_keys=UpperCAmelCase__ )
assert score_sep == score_no_sep
def lowerCamelCase ( ) -> Optional[Any]:
lowercase_ : Union[str, Any] = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
lowercase_ : List[str] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ ) == calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , newline_sep=UpperCAmelCase__ )
def lowerCamelCase ( ) -> Union[str, Any]:
lowercase_ : Optional[Any] = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
lowercase_ : List[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
lowercase_ : Optional[int] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCAmelCase__ )["""rougeLsum"""]
lowercase_ : List[str] = calculate_rouge(UpperCAmelCase__ , UpperCAmelCase__ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def lowerCamelCase ( ) -> Tuple:
lowercase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
lowercase_ : List[Any] = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
lowercase_ : Union[str, Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=UpperCAmelCase__ )
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
| 30
| 0
|
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 704
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : Optional[Any] = logging.get_logger(__name__)
_lowercase : Union[str, Any] = {
"facebook/s2t-small-librispeech-asr": (
"https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json"
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = '''speech_to_text'''
UpperCamelCase__ = ['''past_key_values''']
UpperCamelCase__ = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : str , lowercase_ : Optional[int]=10000 , lowercase_ : int=12 , lowercase_ : Any=2048 , lowercase_ : Any=4 , lowercase_ : Dict=6 , lowercase_ : Any=2048 , lowercase_ : List[str]=4 , lowercase_ : str=0.0 , lowercase_ : str=0.0 , lowercase_ : Union[str, Any]=True , lowercase_ : List[Any]=True , lowercase_ : int="relu" , lowercase_ : str=256 , lowercase_ : int=0.1 , lowercase_ : int=0.0 , lowercase_ : str=0.0 , lowercase_ : Optional[int]=0.02 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=1 , lowercase_ : Dict=0 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=6000 , lowercase_ : Tuple=1024 , lowercase_ : str=2 , lowercase_ : Any=(5, 5) , lowercase_ : Union[str, Any]=1024 , lowercase_ : Dict=80 , lowercase_ : List[Any]=1 , **lowercase_ : int , ):
lowercase_ : List[Any] = vocab_size
lowercase_ : str = d_model
lowercase_ : List[Any] = encoder_ffn_dim
lowercase_ : str = encoder_layers
lowercase_ : Dict = encoder_attention_heads
lowercase_ : str = decoder_ffn_dim
lowercase_ : int = decoder_layers
lowercase_ : Any = decoder_attention_heads
lowercase_ : Any = dropout
lowercase_ : Dict = attention_dropout
lowercase_ : Optional[int] = activation_dropout
lowercase_ : Any = activation_function
lowercase_ : Union[str, Any] = init_std
lowercase_ : str = encoder_layerdrop
lowercase_ : Optional[int] = decoder_layerdrop
lowercase_ : Dict = use_cache
lowercase_ : Union[str, Any] = encoder_layers
lowercase_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowercase_ : Dict = max_source_positions
lowercase_ : Optional[int] = max_target_positions
lowercase_ : Tuple = num_conv_layers
lowercase_ : Tuple = list(lowercase_ )
lowercase_ : Union[str, Any] = conv_channels
lowercase_ : str = input_feat_per_channel
lowercase_ : str = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , is_encoder_decoder=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 30
| 0
|
'''simple docstring'''
import math
import os
import sys
def lowerCamelCase ( UpperCAmelCase__ : int ) -> Tuple:
lowercase_ : Dict = """"""
try:
with open(lowerCAmelCase__ , """rb""" ) as binary_file:
lowercase_ : List[str] = binary_file.read()
for dat in data:
lowercase_ : int = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Any ) -> Tuple:
lexicon.pop(lowerCAmelCase__ )
lowercase_ : List[Any] = last_match_id
if math.loga(lowerCAmelCase__ ).is_integer():
for curr_key in lexicon:
lowercase_ : Union[str, Any] = """0""" + lexicon[curr_key]
lowercase_ : List[str] = bin(lowerCAmelCase__ )[2:]
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> str:
lowercase_ : List[Any] = {"""0""": """0""", """1""": """1"""}
lowercase_ , lowercase_ : List[str] = """""", """"""
lowercase_ : int = len(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowercase_ : int = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
index += 1
lowercase_ : List[str] = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
lowercase_ : int = lexicon[curr_string]
result += last_match_id
return result
def lowerCamelCase ( UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] ) -> Any:
lowercase_ : Union[str, Any] = os.path.getsize(lowerCAmelCase__ )
lowercase_ : Optional[int] = bin(lowerCAmelCase__ )[2:]
lowercase_ : List[str] = len(lowerCAmelCase__ )
return "0" * (length_length - 1) + file_length_binary + compressed
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int ) -> Any:
lowercase_ : Union[str, Any] = 8
try:
with open(lowerCAmelCase__ , """wb""" ) as opened_file:
lowercase_ : Optional[Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(lowerCAmelCase__ , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> List[str]:
lowercase_ : Any = read_file_binary(lowerCAmelCase__ )
lowercase_ : Optional[Any] = compress_data(lowerCAmelCase__ )
lowercase_ : Union[str, Any] = add_file_length(lowerCAmelCase__ , lowerCAmelCase__ )
write_file_binary(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 705
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __magic_name__ :
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : List[Any]=13 , lowercase_ : List[str]=10 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=2 , lowercase_ : Optional[Any]=2 , lowercase_ : int=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=32 , lowercase_ : Union[str, Any]=5 , lowercase_ : str=4 , lowercase_ : Dict=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : Any=10 , lowercase_ : Tuple=0.02 , lowercase_ : Any="divided_space_time" , lowercase_ : Tuple=None , ):
lowercase_ : int = parent
lowercase_ : str = batch_size
lowercase_ : List[str] = image_size
lowercase_ : str = num_channels
lowercase_ : List[Any] = patch_size
lowercase_ : Optional[Any] = num_frames
lowercase_ : Dict = is_training
lowercase_ : int = use_labels
lowercase_ : List[str] = hidden_size
lowercase_ : Dict = num_hidden_layers
lowercase_ : Dict = num_attention_heads
lowercase_ : Any = intermediate_size
lowercase_ : Optional[int] = hidden_act
lowercase_ : Optional[Any] = hidden_dropout_prob
lowercase_ : List[Any] = attention_probs_dropout_prob
lowercase_ : Any = attention_type
lowercase_ : Union[str, Any] = initializer_range
lowercase_ : List[str] = scope
lowercase_ : Optional[int] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
lowercase_ : Dict = (image_size // patch_size) ** 2
lowercase_ : List[Any] = (num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
lowercase_ : Optional[Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowercase_ : int = None
if self.use_labels:
lowercase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
lowercase_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
lowercase_ : Any = self.num_labels
return config
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ):
lowercase_ : Optional[Any] = TimesformerModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : str ):
lowercase_ : Dict = TimesformerForVideoClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
lowercase_ : int = model(lowercase_ )
# verify the logits shape
lowercase_ : List[Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ : List[str] = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs
lowercase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase):
UpperCamelCase__ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCamelCase__ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerModelTester(self )
lowercase_ : Union[str, Any] = ConfigTester(
self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Tuple=False ):
lowercase_ : List[Any] = copy.deepcopy(lowercase_ )
if return_labels:
if model_class in get_values(lowercase_ ):
lowercase_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""TimeSformer does not use inputs_embeds""" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : str = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : Any ):
lowercase_ , lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : Dict = model_class(lowercase_ )
lowercase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ : Union[str, Any] = [*signature.parameters.keys()]
lowercase_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ : Any = TimesformerModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : str ):
if not self.has_attentions:
pass
else:
lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ : List[str] = True
for model_class in self.all_model_classes:
lowercase_ : str = self.model_tester.seq_length
lowercase_ : int = self.model_tester.num_frames
lowercase_ : int = True
lowercase_ : Any = False
lowercase_ : str = True
lowercase_ : int = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : List[str] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase_ : List[str] = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : int = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
lowercase_ : Optional[Any] = len(lowercase_ )
# Check attention is always last and order is fine
lowercase_ : Tuple = True
lowercase_ : Dict = True
lowercase_ : str = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
self.assertEqual(out_len + 1 , len(lowercase_ ) )
lowercase_ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
def check_hidden_states_output(lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Dict ):
lowercase_ : List[str] = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
lowercase_ : Optional[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) )
lowercase_ : Dict = outputs.hidden_states
lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase_ ) , lowercase_ )
lowercase_ : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ : List[str] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ : Optional[int] = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ )
def lowerCamelCase ( ) -> Optional[int]:
lowercase_ : List[str] = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
lowercase_ : List[Any] = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
lowercase_ : Any = TimesformerForVideoClassification.from_pretrained("""facebook/timesformer-base-finetuned-k400""" ).to(
lowercase_ )
lowercase_ : Optional[Any] = self.default_image_processor
lowercase_ : Any = prepare_video()
lowercase_ : Optional[int] = image_processor(video[:8] , return_tensors="""pt""" ).to(lowercase_ )
# forward pass
with torch.no_grad():
lowercase_ : Optional[Any] = model(**lowercase_ )
# verify the logits
lowercase_ : Any = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , lowercase_ )
lowercase_ : int = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
| 30
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : List[str] ):
if not numbers:
return 0
if not isinstance(lowerCAmelCase_ , (list, tuple) ) or not all(
isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
lowercase_ : List[str] = numbers[0]
for i in range(1 , len(lowerCAmelCase_ ) ):
# update the maximum and minimum subarray products
lowercase_ : List[Any] = numbers[i]
if number < 0:
lowercase_ : Optional[Any] = min_till_now, max_till_now
lowercase_ : Dict = max(lowerCAmelCase_ , max_till_now * number )
lowercase_ : Any = min(lowerCAmelCase_ , min_till_now * number )
# update the maximum product found till now
lowercase_ : Optional[int] = max(lowerCAmelCase_ , lowerCAmelCase_ )
return max_prod
| 706
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
_lowercase : Tuple = logging.get_logger(__name__)
# General docstring
_lowercase : List[str] = "RegNetConfig"
# Base docstring
_lowercase : Dict = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = [1, 1088, 7, 7]
# Image classification docstring
_lowercase : Optional[Any] = "facebook/regnet-y-040"
_lowercase : Union[str, Any] = "tabby, tabby cat"
_lowercase : str = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __magic_name__ ( nn.Module):
def __init__( self : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 3 , lowercase_ : int = 1 , lowercase_ : int = 1 , lowercase_ : Optional[str] = "relu" , ):
super().__init__()
lowercase_ : List[Any] = nn.Convad(
lowercase_ , lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=kernel_size // 2 , groups=lowercase_ , bias=lowercase_ , )
lowercase_ : str = nn.BatchNormad(lowercase_ )
lowercase_ : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : List[str] ):
lowercase_ : Dict = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : List[Any] , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : str = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
lowercase_ : Any = config.num_channels
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Optional[Any] ):
lowercase_ : List[str] = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
lowercase_ : Any = self.embedder(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 ):
super().__init__()
lowercase_ : Optional[Any] = nn.Convad(lowercase_ , lowercase_ , kernel_size=1 , stride=lowercase_ , bias=lowercase_ )
lowercase_ : Union[str, Any] = nn.BatchNormad(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : Tensor ):
lowercase_ : Tuple = self.convolution(lowercase_ )
lowercase_ : str = self.normalization(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : int , lowercase_ : int ):
super().__init__()
lowercase_ : int = nn.AdaptiveAvgPoolad((1, 1) )
lowercase_ : int = nn.Sequential(
nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.ReLU() , nn.Convad(lowercase_ , lowercase_ , kernel_size=1 ) , nn.Sigmoid() , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Any ):
# b c h w -> b c 1 1
lowercase_ : List[str] = self.pooler(lowercase_ )
lowercase_ : Optional[int] = self.attention(lowercase_ )
lowercase_ : Any = hidden_state * attention
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[int] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : List[Any] = in_channels != out_channels or stride != 1
lowercase_ : Optional[int] = max(1 , out_channels // config.groups_width )
lowercase_ : Dict = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : List[Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : int = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any ):
lowercase_ : Any = hidden_state
lowercase_ : Union[str, Any] = self.layer(lowercase_ )
lowercase_ : Union[str, Any] = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : str = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Optional[Any] , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 1 ):
super().__init__()
lowercase_ : str = in_channels != out_channels or stride != 1
lowercase_ : int = max(1 , out_channels // config.groups_width )
lowercase_ : int = (
RegNetShortCut(lowercase_ , lowercase_ , stride=lowercase_ ) if should_apply_shortcut else nn.Identity()
)
lowercase_ : Union[str, Any] = nn.Sequential(
RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(lowercase_ , lowercase_ , stride=lowercase_ , groups=lowercase_ , activation=config.hidden_act ) , RegNetSELayer(lowercase_ , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(lowercase_ , lowercase_ , kernel_size=1 , activation=lowercase_ ) , )
lowercase_ : Optional[int] = ACTaFN[config.hidden_act]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
lowercase_ : Optional[int] = hidden_state
lowercase_ : str = self.layer(lowercase_ )
lowercase_ : int = self.shortcut(lowercase_ )
hidden_state += residual
lowercase_ : Optional[Any] = self.activation(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : str , lowercase_ : RegNetConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int = 2 , lowercase_ : int = 2 , ):
super().__init__()
lowercase_ : str = RegNetXLayer if config.layer_type == """x""" else RegNetYLayer
lowercase_ : str = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
lowercase_ , lowercase_ , lowercase_ , stride=lowercase_ , ) , *[layer(lowercase_ , lowercase_ , lowercase_ ) for _ in range(depth - 1 )] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[str] ):
lowercase_ : Tuple = self.layers(lowercase_ )
return hidden_state
class __magic_name__ ( nn.Module):
def __init__( self : Dict , lowercase_ : RegNetConfig ):
super().__init__()
lowercase_ : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
lowercase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
lowercase_ : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowercase_ , config.depths[1:] ):
self.stages.append(RegNetStage(lowercase_ , lowercase_ , lowercase_ , depth=lowercase_ ) )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : bool = False , lowercase_ : bool = True ):
lowercase_ : Tuple = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase_ : Union[str, Any] = hidden_states + (hidden_state,)
lowercase_ : Dict = stage_module(lowercase_ )
if output_hidden_states:
lowercase_ : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=lowercase_ , hidden_states=lowercase_ )
class __magic_name__ ( _UpperCAmelCase):
UpperCamelCase__ = RegNetConfig
UpperCamelCase__ = '''regnet'''
UpperCamelCase__ = '''pixel_values'''
UpperCamelCase__ = True
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Optional[Any] ):
if isinstance(lowercase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(lowercase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any=False ):
if isinstance(lowercase_ , lowercase_ ):
lowercase_ : List[str] = value
_lowercase : Dict = r"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
_lowercase : Any = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Any , lowercase_ : Any ):
super().__init__(lowercase_ )
lowercase_ : List[str] = config
lowercase_ : Union[str, Any] = RegNetEmbeddings(lowercase_ )
lowercase_ : Union[str, Any] = RegNetEncoder(lowercase_ )
lowercase_ : str = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tensor , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None ):
lowercase_ : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase_ : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : str = self.embedder(lowercase_ )
lowercase_ : Optional[Any] = self.encoder(
lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : List[Any] = encoder_outputs[0]
lowercase_ : str = self.pooler(lowercase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''', _UpperCAmelCase, )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __magic_name__ ( _UpperCAmelCase):
def __init__( self : Dict , lowercase_ : str ):
super().__init__(lowercase_ )
lowercase_ : Any = config.num_labels
lowercase_ : List[str] = RegNetModel(lowercase_ )
# classification head
lowercase_ : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowercase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[torch.LongTensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ):
lowercase_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
lowercase_ : Optional[int] = self.regnet(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ )
lowercase_ : Optional[int] = outputs.pooler_output if return_dict else outputs[1]
lowercase_ : List[Any] = self.classifier(lowercase_ )
lowercase_ : Optional[int] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase_ : Optional[int] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase_ : str = """single_label_classification"""
else:
lowercase_ : str = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase_ : str = MSELoss()
if self.num_labels == 1:
lowercase_ : List[Any] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase_ : List[str] = loss_fct(lowercase_ , lowercase_ )
elif self.config.problem_type == "single_label_classification":
lowercase_ : Optional[int] = CrossEntropyLoss()
lowercase_ : Union[str, Any] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase_ : Dict = BCEWithLogitsLoss()
lowercase_ : Tuple = loss_fct(lowercase_ , lowercase_ )
if not return_dict:
lowercase_ : Tuple = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states )
| 30
| 0
|
from collections.abc import Iterable
from typing import Any
class __magic_name__ :
def __init__( self : List[Any] , lowercase_ : Tuple = None ):
lowercase_ : Any = value
lowercase_ : List[Any] = None # Added in order to delete a node easier
lowercase_ : Union[str, Any] = None
lowercase_ : List[Any] = None
def __repr__( self : Optional[int] ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} , indent=1 )
class __magic_name__ :
def __init__( self : Any , lowercase_ : int = None ):
lowercase_ : List[Any] = root
def __str__( self : str ):
return str(self.root )
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : Union[str, Any] , lowercase_ : int ):
if new_children is not None: # reset its kids
lowercase_ : Any = node.parent
if node.parent is not None: # reset its parent
if self.is_right(lowercase_ ): # If it is the right children
lowercase_ : List[str] = new_children
else:
lowercase_ : Optional[Any] = new_children
else:
lowercase_ : List[Any] = new_children
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : Any ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
return self.root is None
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : int ):
lowercase_ : Optional[int] = Node(lowercase_ ) # create a new Node
if self.empty(): # if Tree is empty
lowercase_ : Optional[Any] = new_node # set its root
else: # Tree is not empty
lowercase_ : Optional[int] = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowercase_ : Any = new_node # We insert the new node in a leaf
break
else:
lowercase_ : List[Any] = parent_node.left
else:
if parent_node.right is None:
lowercase_ : Any = new_node
break
else:
lowercase_ : int = parent_node.right
lowercase_ : List[str] = parent_node
def SCREAMING_SNAKE_CASE_ ( self : List[str] , *lowercase_ : List[Any] ):
for value in values:
self.__insert(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , lowercase_ : Dict ):
if self.empty():
raise IndexError("""Warning: Tree is empty! please use another.""" )
else:
lowercase_ : Optional[int] = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowercase_ : List[Any] = node.left if value < node.value else node.right
return node
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : str = None ):
if node is None:
if self.root is None:
return None
lowercase_ : Optional[Any] = self.root
if not self.empty():
while node.right is not None:
lowercase_ : str = node.right
return node
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : Any = None ):
if node is None:
lowercase_ : Dict = self.root
if self.root is None:
return None
if not self.empty():
lowercase_ : Any = self.root
while node.left is not None:
lowercase_ : int = node.left
return node
def SCREAMING_SNAKE_CASE_ ( self : Tuple , lowercase_ : List[str] ):
lowercase_ : str = self.search(lowercase_ ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(lowercase_ , lowercase_ )
elif node.left is None: # Has only right children
self.__reassign_nodes(lowercase_ , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(lowercase_ , node.left )
else:
lowercase_ : Dict = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowercase_ : Optional[Any] = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Optional[int] ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : List[Any]=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def SCREAMING_SNAKE_CASE_ ( self : str , lowercase_ : Tuple , lowercase_ : Optional[Any] ):
if node:
self.inorder(lowercase_ , node.left )
arr.append(node.value )
self.inorder(lowercase_ , node.right )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Dict , lowercase_ : int ):
lowercase_ : Tuple = []
self.inorder(lowercase_ , lowercase_ ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Tuple:
lowercase_ : Tuple = []
if curr_node is not None:
lowercase_ : List[str] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase ( ) -> Tuple:
lowercase_ : List[Any] = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowercase_ : Union[str, Any] = BinarySearchTree()
for i in testlist:
t.insert(_snake_case )
# Prints all the elements of the list in order traversal
print(_snake_case )
if t.search(6 ) is not None:
print("""The value 6 exists""" )
else:
print("""The value 6 doesn't exist""" )
if t.search(-1 ) is not None:
print("""The value -1 exists""" )
else:
print("""The value -1 doesn't exist""" )
if not t.empty():
print("""Max Value: """ , t.get_max().value ) # type: ignore
print("""Min Value: """ , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(_snake_case )
print(_snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : List[Any] = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 0
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : List[Any]=False ) -> Any:
lowercase_ : Optional[int] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''module.blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''module.blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''module.blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''module.blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase_ : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[Any]=False ) -> List[str]:
for i in range(config.num_hidden_layers ):
if base_model:
lowercase_ : Dict = """"""
else:
lowercase_ : List[str] = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase_ : Optional[int] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.weight''' )
lowercase_ : Union[str, Any] = state_dict.pop(F'''module.blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase_ : int = in_proj_weight[
: config.hidden_size, :
]
lowercase_ : Tuple = in_proj_bias[: config.hidden_size]
lowercase_ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase_ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase_ : List[Any] = in_proj_weight[
-config.hidden_size :, :
]
lowercase_ : Dict = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] ) -> Dict:
lowercase_ : str = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(_A , _A )
def lowerCamelCase ( UpperCAmelCase__ : str ) -> int:
lowercase_ : Any = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ) -> int:
lowercase_ : Dict = dct.pop(_A )
lowercase_ : Optional[Any] = val
def lowerCamelCase ( UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] ) -> Any:
lowercase_ : str = ViTMSNConfig()
lowercase_ : Tuple = 1000
lowercase_ : Union[str, Any] = """datasets/huggingface/label-files"""
lowercase_ : int = """imagenet-1k-id2label.json"""
lowercase_ : List[str] = json.load(open(hf_hub_download(_A , _A ) , """r""" ) )
lowercase_ : Dict = {int(_A ): v for k, v in idalabel.items()}
lowercase_ : str = idalabel
lowercase_ : str = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase_ : List[Any] = 384
lowercase_ : List[str] = 1536
lowercase_ : Dict = 6
elif "l16" in checkpoint_url:
lowercase_ : Optional[Any] = 1024
lowercase_ : List[str] = 4096
lowercase_ : Tuple = 24
lowercase_ : Optional[Any] = 16
lowercase_ : str = 0.1
elif "b4" in checkpoint_url:
lowercase_ : Optional[int] = 4
elif "l7" in checkpoint_url:
lowercase_ : List[str] = 7
lowercase_ : int = 1024
lowercase_ : Tuple = 4096
lowercase_ : Tuple = 24
lowercase_ : Union[str, Any] = 16
lowercase_ : str = 0.1
lowercase_ : Tuple = ViTMSNModel(_A )
lowercase_ : List[Any] = torch.hub.load_state_dict_from_url(_A , map_location="""cpu""" )["""target_encoder"""]
lowercase_ : Tuple = ViTImageProcessor(size=config.image_size )
remove_projection_head(_A )
lowercase_ : str = create_rename_keys(_A , base_model=_A )
for src, dest in rename_keys:
rename_key(_A , _A , _A )
read_in_q_k_v(_A , _A , base_model=_A )
model.load_state_dict(_A )
model.eval()
lowercase_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowercase_ : Any = Image.open(requests.get(_A , stream=_A ).raw )
lowercase_ : List[str] = ViTImageProcessor(
size=config.image_size , image_mean=_A , image_std=_A )
lowercase_ : Tuple = image_processor(images=_A , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
lowercase_ : Dict = model(**_A )
lowercase_ : Union[str, Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase_ : Tuple = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
lowercase_ : int = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
lowercase_ : Dict = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
lowercase_ : Optional[Any] = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
lowercase_ : str = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _A , atol=1e-4 )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_A )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(_A )
if __name__ == "__main__":
_lowercase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar",
type=str,
help="URL of the checkpoint you\'d like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowercase : int = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 708
|
'''simple docstring'''
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize("""dataset_size""" , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize("""input_in_memory_max_size""" , ["""default""", 0, 100 * 2**20, 900 * 2**20] )
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str ) -> Any:
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , """IN_MEMORY_MAX_SIZE""" , UpperCAmelCase__ )
lowercase_ : List[Any] = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
lowercase_ : str = dataset_size < in_memory_max_size
else:
lowercase_ : List[Any] = False
lowercase_ : Any = is_small_dataset(UpperCAmelCase__ )
assert result == expected
| 30
| 0
|
'''simple docstring'''
import numpy as np
import datasets
_lowercase : int = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n'
_lowercase : Optional[Any] = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n'
_lowercase : Union[str, Any] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class __magic_name__ ( datasets.Metric):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : List[str] ):
lowercase_ : Optional[int] = np.array(UpperCamelCase__ )
lowercase_ : List[str] = np.array(UpperCamelCase__ )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
lowercase_ : Optional[Any] = X - np.mean(UpperCamelCase__ )
lowercase_ : Optional[Any] = np.cov(reference_distribution.T )
try:
lowercase_ : Tuple = np.linalg.inv(UpperCamelCase__ )
except np.linalg.LinAlgError:
lowercase_ : Any = np.linalg.pinv(UpperCamelCase__ )
lowercase_ : Optional[int] = np.dot(UpperCamelCase__ , UpperCamelCase__ )
lowercase_ : Any = np.dot(UpperCamelCase__ , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 709
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class __magic_name__ ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE_ ( self : str ):
lowercase_ : Any = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
lowercase_ : Dict = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowercase_ : Union[str, Any] = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
lowercase_ : List[str] = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
lowercase_ : Optional[Any] = model(lowercase_ , labels=lowercase_ ).loss
lowercase_ : Optional[int] = -tf.math.reduce_mean(lowercase_ ).numpy()
lowercase_ : Optional[int] = -21.22_81_68
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 30
| 0
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase : Optional[Any] = logging.get_logger(__name__)
def lowerCamelCase ( UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple=False ) -> Union[str, Any]:
lowercase_ : str = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
lowercase_ : Optional[Any] = 'segformer.encoder.' + key
if key.startswith("""backbone""" ):
lowercase_ : int = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowercase_ : Any = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowercase_ : Dict = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCAmelCase__ )-1}''' )
if "norm" in key:
lowercase_ : List[str] = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowercase_ : Optional[Any] = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
lowercase_ : Optional[int] = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCAmelCase__ )-1}''' )
if "layer_norm1" in key:
lowercase_ : str = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowercase_ : List[str] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowercase_ : Any = key[key.find("""block""" ) + len("""block""" )]
lowercase_ : Optional[Any] = key.replace(F'''block{idx}''' , F'''block.{int(UpperCAmelCase__ )-1}''' )
if "attn.q" in key:
lowercase_ : int = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowercase_ : str = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowercase_ : Optional[Any] = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowercase_ : Optional[int] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowercase_ : List[str] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowercase_ : str = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowercase_ : List[Any] = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowercase_ : Optional[Any] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowercase_ : Union[str, Any] = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowercase_ : Dict = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCAmelCase__ )-1}''' )
if key.startswith("""head""" ):
lowercase_ : Dict = key.replace("""head""" , """classifier""" )
lowercase_ : int = value
return new_state_dict
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowercase_ : Tuple = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.weight''' )
lowercase_ : Union[str, Any] = state_dict.pop(F'''segformer.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
lowercase_ : Tuple = kv_weight[
: config.hidden_sizes[i], :
]
lowercase_ : List[str] = kv_bias[: config.hidden_sizes[i]]
lowercase_ : Optional[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
lowercase_ : Tuple = kv_bias[
config.hidden_sizes[i] :
]
def lowerCamelCase ( ) -> Dict:
lowercase_ : Union[str, Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase_ : Tuple = Image.open(requests.get(UpperCAmelCase__ , stream=UpperCAmelCase__ ).raw )
return image
@torch.no_grad()
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[str] ) -> str:
lowercase_ : Optional[int] = SegformerConfig()
lowercase_ : Optional[int] = False
# set attributes based on model_name
lowercase_ : List[Any] = 'huggingface/label-files'
if "segformer" in model_name:
lowercase_ : Tuple = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
lowercase_ : Any = 150
lowercase_ : List[Any] = 'ade20k-id2label.json'
lowercase_ : List[Any] = (1, 150, 128, 128)
elif "city" in model_name:
lowercase_ : Union[str, Any] = 19
lowercase_ : Optional[int] = 'cityscapes-id2label.json'
lowercase_ : List[Any] = (1, 19, 128, 128)
else:
raise ValueError(F'''Model {model_name} not supported''' )
elif "mit" in model_name:
lowercase_ : Union[str, Any] = True
lowercase_ : List[Any] = model_name[4:6]
lowercase_ : Optional[Any] = 1000
lowercase_ : Tuple = 'imagenet-1k-id2label.json'
lowercase_ : str = (1, 1000)
else:
raise ValueError(F'''Model {model_name} not supported''' )
# set config attributes
lowercase_ : List[str] = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowercase_ : Optional[Any] = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
lowercase_ : Dict = idalabel
lowercase_ : Dict = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
lowercase_ : str = [64, 128, 320, 512]
lowercase_ : Dict = 256
elif size == "b2":
lowercase_ : Optional[Any] = [64, 128, 320, 512]
lowercase_ : List[Any] = 768
lowercase_ : Any = [3, 4, 6, 3]
elif size == "b3":
lowercase_ : Optional[int] = [64, 128, 320, 512]
lowercase_ : int = 768
lowercase_ : int = [3, 4, 18, 3]
elif size == "b4":
lowercase_ : int = [64, 128, 320, 512]
lowercase_ : Tuple = 768
lowercase_ : List[Any] = [3, 8, 27, 3]
elif size == "b5":
lowercase_ : Dict = [64, 128, 320, 512]
lowercase_ : Union[str, Any] = 768
lowercase_ : Optional[int] = [3, 6, 40, 3]
else:
raise ValueError(F'''Size {size} not supported''' )
# load image processor (only resize + normalize)
lowercase_ : Optional[int] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCAmelCase__ , align=UpperCAmelCase__ , do_random_crop=UpperCAmelCase__ )
# prepare image
lowercase_ : str = prepare_img()
lowercase_ : Optional[int] = image_processor(images=UpperCAmelCase__ , return_tensors="""pt""" ).pixel_values
logger.info(F'''Converting model {model_name}...''' )
# load original state dict
if encoder_only:
lowercase_ : List[Any] = torch.load(UpperCAmelCase__ , map_location=torch.device("""cpu""" ) )
else:
lowercase_ : int = torch.load(UpperCAmelCase__ , map_location=torch.device("""cpu""" ) )['state_dict']
# rename keys
lowercase_ : str = rename_keys(UpperCAmelCase__ , encoder_only=UpperCAmelCase__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(UpperCAmelCase__ , UpperCAmelCase__ )
# create HuggingFace model and load state dict
if encoder_only:
lowercase_ : List[Any] = False
lowercase_ : Optional[Any] = SegformerForImageClassification(UpperCAmelCase__ )
else:
lowercase_ : List[str] = SegformerForSemanticSegmentation(UpperCAmelCase__ )
model.load_state_dict(UpperCAmelCase__ )
model.eval()
# forward pass
lowercase_ : str = model(UpperCAmelCase__ )
lowercase_ : Optional[Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
lowercase_ : int = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
lowercase_ : Tuple = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
lowercase_ : Optional[Any] = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
lowercase_ : int = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
lowercase_ : Union[str, Any] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
lowercase_ : int = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
lowercase_ : List[str] = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
lowercase_ : Optional[Any] = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
lowercase_ : Dict = torch.tensor(
[
[
[-1.1372e01, -1.2787e01, -1.3477e01],
[-1.2536e01, -1.4194e01, -1.4409e01],
[-1.3217e01, -1.4888e01, -1.5327e01],
],
[
[-1.4791e01, -1.7122e01, -1.8277e01],
[-1.7163e01, -1.9192e01, -1.9533e01],
[-1.7897e01, -1.9991e01, -2.0315e01],
],
[
[7.6723e-01, 4.1921e-01, -7.7878e-02],
[4.7772e-01, 9.5557e-03, -2.8082e-01],
[3.6032e-01, -2.4826e-01, -5.1168e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
lowercase_ : Optional[Any] = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
lowercase_ : List[Any] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
lowercase_ : List[str] = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
lowercase_ : Optional[int] = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
lowercase_ : Tuple = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
lowercase_ : Tuple = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
lowercase_ : Dict = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(UpperCAmelCase__ ).mkdir(exist_ok=UpperCAmelCase__ )
model.save_pretrained(UpperCAmelCase__ )
image_processor.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
_lowercase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you\'d like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
_lowercase : List[str] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 710
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : Callable , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float , UpperCAmelCase__ : float ) -> np.array:
lowercase_ : Any = int(np.ceil((x_end - xa) / step_size ) )
lowercase_ : List[Any] = np.zeros((n + 1,) )
lowercase_ : List[Any] = ya
lowercase_ : List[str] = xa
for k in range(UpperCAmelCase__ ):
lowercase_ : Optional[Any] = y[k] + step_size * ode_func(UpperCAmelCase__ , y[k] )
lowercase_ : List[Any] = y[k] + (
(step_size / 2) * (ode_func(UpperCAmelCase__ , y[k] ) + ode_func(x + step_size , UpperCAmelCase__ ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : Union[str, Any] ) -> bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Dict = (1 - _cos) / 2
lowercase_ : Optional[int] = 1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : Optional[int] = sin(UpperCAmelCase__ )
lowercase_ : Dict = cos(UpperCAmelCase__ )
lowercase_ : Optional[int] = _sin / (2 * q_factor)
lowercase_ : Dict = (1 + _cos) / 2
lowercase_ : str = -1 - _cos
lowercase_ : Dict = 1 + alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : List[Any] = 1 - alpha
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : int = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : Union[str, Any] = cos(UpperCAmelCase__ )
lowercase_ : str = _sin / (2 * q_factor)
lowercase_ : str = _sin / 2
lowercase_ : Any = 0
lowercase_ : Optional[Any] = -ba
lowercase_ : Dict = 1 + alpha
lowercase_ : Union[str, Any] = -2 * _cos
lowercase_ : Union[str, Any] = 1 - alpha
lowercase_ : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 1 / sqrt(2 ) ) -> IIRFilter:
lowercase_ : List[str] = tau * frequency / samplerate
lowercase_ : Any = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : Optional[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 1 - alpha
lowercase_ : Optional[Any] = -2 * _cos
lowercase_ : Optional[int] = 1 + alpha
lowercase_ : Dict = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Tuple = sin(UpperCAmelCase__ )
lowercase_ : List[Any] = cos(UpperCAmelCase__ )
lowercase_ : List[Any] = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : List[str] = 1 + alpha * big_a
lowercase_ : List[Any] = -2 * _cos
lowercase_ : Dict = 1 - alpha * big_a
lowercase_ : str = 1 + alpha / big_a
lowercase_ : List[str] = -2 * _cos
lowercase_ : Tuple = 1 - alpha / big_a
lowercase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : Dict = tau * frequency / samplerate
lowercase_ : Union[str, Any] = sin(UpperCAmelCase__ )
lowercase_ : Any = cos(UpperCAmelCase__ )
lowercase_ : Any = _sin / (2 * q_factor)
lowercase_ : Any = 10 ** (gain_db / 40)
lowercase_ : Any = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : Optional[Any] = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : int = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (pmc + aaa)
lowercase_ : List[str] = 2 * big_a * mpc
lowercase_ : Union[str, Any] = big_a * (pmc - aaa)
lowercase_ : Optional[int] = ppmc + aaa
lowercase_ : Optional[int] = -2 * pmpc
lowercase_ : Any = ppmc - aaa
lowercase_ : Optional[int] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : int , UpperCAmelCase__ : float , UpperCAmelCase__ : float = 1 / sqrt(2 ) , ) -> IIRFilter:
lowercase_ : str = tau * frequency / samplerate
lowercase_ : int = sin(UpperCAmelCase__ )
lowercase_ : int = cos(UpperCAmelCase__ )
lowercase_ : Dict = _sin / (2 * q_factor)
lowercase_ : Union[str, Any] = 10 ** (gain_db / 40)
lowercase_ : Union[str, Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase_ : Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowercase_ : str = (big_a - 1) + (big_a + 1) * _cos
lowercase_ : Optional[int] = 2 * sqrt(UpperCAmelCase__ ) * alpha
lowercase_ : Tuple = big_a * (ppmc + aaa)
lowercase_ : List[Any] = -2 * big_a * pmpc
lowercase_ : Optional[Any] = big_a * (ppmc - aaa)
lowercase_ : Optional[Any] = pmc + aaa
lowercase_ : int = 2 * mpc
lowercase_ : Tuple = pmc - aaa
lowercase_ : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 30
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class __magic_name__ :
def __init__( self : Optional[Any] , lowercase_ : int ):
lowercase_ : List[Any] = num_of_nodes
lowercase_ : list[list[int]] = []
lowercase_ : dict[int, int] = {}
def SCREAMING_SNAKE_CASE_ ( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowercase_ : Tuple = self.find_component(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : list[int] , lowercase_ : int , lowercase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowercase_ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCamelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowercase_ : Optional[int] = self.find_component(lowerCamelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
lowercase_ : int = []
lowercase_ : Optional[int] = 0
lowercase_ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowercase_ : Optional[Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowercase_ : Tuple = edge
lowercase_ : Dict = self.m_component[u]
lowercase_ : Tuple = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowercase_ : Optional[Any] = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowercase_ : str = edge
lowercase_ : Dict = self.m_component[u]
lowercase_ : List[str] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
lowercase_ : List[Any] = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def lowerCamelCase ( ) -> Optional[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712
|
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
_lowercase : str = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class __magic_name__ ( datasets.BuilderConfig):
UpperCamelCase__ = None
def lowerCamelCase ( UpperCAmelCase__ : "pyspark.sql.DataFrame" , UpperCAmelCase__ : List[int] , ) -> str:
import pyspark
def generate_fn():
lowercase_ : List[str] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
lowercase_ : int = df_with_partition_id.select("""*""" ).where(F'''part_id = {partition_id}''' ).drop("""part_id""" )
lowercase_ : Any = partition_df.collect()
lowercase_ : Dict = 0
for row in rows:
yield F'''{partition_id}_{row_id}''', row.asDict()
row_id += 1
return generate_fn
class __magic_name__ ( _BaseExamplesIterable):
def __init__( self : int , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : Optional[int]=None , ):
lowercase_ : Dict = df
lowercase_ : Optional[Any] = partition_order or range(self.df.rdd.getNumPartitions() )
lowercase_ : Optional[Any] = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : List[Any] ):
yield from self.generate_examples_fn()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : np.random.Generator ):
lowercase_ : str = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , lowercase_ : int , lowercase_ : int ):
lowercase_ : str = self.split_shard_indices_by_worker(lowercase_ , lowercase_ )
return SparkExamplesIterable(self.df , partition_order=lowercase_ )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
return len(self.partition_order )
class __magic_name__ ( datasets.DatasetBuilder):
UpperCamelCase__ = SparkConfig
def __init__( self : Tuple , lowercase_ : "pyspark.sql.DataFrame" , lowercase_ : str = None , lowercase_ : str = None , **lowercase_ : str , ):
import pyspark
lowercase_ : str = pyspark.sql.SparkSession.builder.getOrCreate()
lowercase_ : Optional[int] = df
lowercase_ : List[str] = working_dir
super().__init__(
cache_dir=lowercase_ , config_name=str(self.df.semanticHash() ) , **lowercase_ , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
# Returns the path of the created file.
def create_cache_and_write_probe(lowercase_ : str ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowercase_ )
lowercase_ : List[str] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowercase_ , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
lowercase_ : str = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowercase_ ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
return datasets.DatasetInfo(features=self.config.features )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , lowercase_ : datasets.download.download_manager.DownloadManager ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def SCREAMING_SNAKE_CASE_ ( self : List[str] , lowercase_ : Union[str, Any] ):
import pyspark
def get_arrow_batch_size(lowercase_ : Any ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
lowercase_ : Union[str, Any] = self.df.count()
lowercase_ : Union[str, Any] = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
lowercase_ : Any = (
self.df.limit(lowercase_ )
.repartition(1 )
.mapInArrow(lowercase_ , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
lowercase_ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
lowercase_ : Any = min(lowercase_ , int(approx_total_size / max_shard_size ) )
lowercase_ : Any = self.df.repartition(lowercase_ )
def SCREAMING_SNAKE_CASE_ ( self : Any , lowercase_ : str , lowercase_ : str , lowercase_ : int , ):
import pyspark
lowercase_ : Any = ParquetWriter if file_format == """parquet""" else ArrowWriter
lowercase_ : Dict = os.path.join(self._working_dir , os.path.basename(lowercase_ ) ) if self._working_dir else fpath
lowercase_ : Optional[Any] = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
lowercase_ : Tuple = self.config.features
lowercase_ : Any = self._writer_batch_size
lowercase_ : List[str] = self._fs.storage_options
def write_arrow(lowercase_ : str ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
lowercase_ : List[str] = pyspark.TaskContext().taskAttemptId()
lowercase_ : Dict = next(lowercase_ , lowercase_ )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
lowercase_ : int = 0
lowercase_ : List[Any] = writer_class(
features=lowercase_ , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : Optional[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowercase_ )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
lowercase_ , lowercase_ : Dict = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
lowercase_ : Any = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , writer_batch_size=lowercase_ , storage_options=lowercase_ , embed_local_files=lowercase_ , )
lowercase_ : List[str] = pa.Table.from_batches([batch] )
writer.write_table(lowercase_ )
if writer._num_bytes > 0:
lowercase_ , lowercase_ : str = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowercase_ ) ):
lowercase_ : Optional[Any] = os.path.join(os.path.dirname(lowercase_ ) , os.path.basename(lowercase_ ) )
shutil.move(lowercase_ , lowercase_ )
lowercase_ : Union[str, Any] = (
self.df.mapInArrow(lowercase_ , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def SCREAMING_SNAKE_CASE_ ( self : Dict , lowercase_ : "datasets.SplitGenerator" , lowercase_ : str = "arrow" , lowercase_ : Optional[Union[str, int]] = None , lowercase_ : Optional[int] = None , **lowercase_ : List[str] , ):
self._validate_cache_dir()
lowercase_ : int = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowercase_ )
lowercase_ : Tuple = not is_remote_filesystem(self._fs )
lowercase_ : int = os.path.join if is_local else posixpath.join
lowercase_ : Dict = """-TTTTT-SSSSS-of-NNNNN"""
lowercase_ : Dict = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}'''
lowercase_ : Optional[int] = path_join(self._output_dir , lowercase_ )
lowercase_ : Any = 0
lowercase_ : Tuple = 0
lowercase_ : int = 0
lowercase_ : Dict = []
lowercase_ : Union[str, Any] = []
for task_id, content in self._prepare_split_single(lowercase_ , lowercase_ , lowercase_ ):
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) : Union[str, Any] = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowercase_ )
lowercase_ : List[str] = total_num_examples
lowercase_ : int = total_num_bytes
# should rename everything at the end
logger.debug(f'''Renaming {total_shards} shards.''' )
if total_shards > 1:
lowercase_ : Tuple = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
lowercase_ : Dict = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowercase_ : int , lowercase_ : int , lowercase_ : int , ):
rename(
lowercase_ , fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace("""TTTTT-SSSSS""" , f'''{global_shard_id:05d}''' ).replace("""NNNNN""" , f'''{total_shards:05d}''' ) , )
lowercase_ : Union[str, Any] = []
lowercase_ : Tuple = 0
for i in range(len(lowercase_ ) ):
lowercase_ , lowercase_ : List[Any] = task_id_and_num_shards[i]
for shard_id in range(lowercase_ ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowercase_ , len(lowercase_ ) ).map(lambda lowercase_ : _rename_shard(*lowercase_ ) ).collect()
else:
# don't use any pattern
lowercase_ : List[str] = 0
lowercase_ : Optional[Any] = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , f'''{shard_id:05d}''' ).replace("""TTTTT""" , f'''{task_id:05d}''' ) , fpath.replace(lowercase_ , """""" ) , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , lowercase_ : "datasets.SplitGenerator" , ):
return SparkExamplesIterable(self.df )
| 30
| 0
|
'''simple docstring'''
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_lowercase : str = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def lowerCamelCase ( UpperCAmelCase__ : Optional[int] ) -> Any:
lowercase_ : Optional[Any] = list(s_dict.keys() )
for key in keys:
lowercase_ : List[str] = R""".*/layers_(\d+)"""
lowercase_ : Optional[Any] = key
if re.match(A__ , A__ ):
lowercase_ : Optional[int] = re.sub(R"""layers_(\d+)""" , R"""block/\1/layer""" , A__ )
lowercase_ : Optional[int] = R"""(encoder|decoder)\/"""
if re.match(A__ , A__ ):
lowercase_ : List[str] = re.match(A__ , A__ ).groups()
if groups[0] == "encoder":
lowercase_ : List[Any] = re.sub(R"""/mlp/""" , R"""/1/mlp/""" , A__ )
lowercase_ : Tuple = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/1/layer_norm/""" , A__ )
elif groups[0] == "decoder":
lowercase_ : Tuple = re.sub(R"""/mlp/""" , R"""/2/mlp/""" , A__ )
lowercase_ : str = re.sub(R"""/pre_mlp_layer_norm/""" , R"""/2/layer_norm/""" , A__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowercase_ : Dict = new_key.replace(A__ , A__ )
print(F'''{key} -> {new_key}''' )
lowercase_ : int = s_dict.pop(A__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowercase_ : str = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowercase_ : Optional[Any] = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowercase_ : Tuple = s_dict[key].shape[0]
lowercase_ : Optional[int] = s_dict[key]
for idx in range(A__ ):
lowercase_ : int = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring' )}''' )
s_dict.pop(A__ )
return s_dict
_lowercase : Any = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def lowerCamelCase ( UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple ) -> List[Any]:
import regex as re
with open(A__ , """r""" ) as f:
lowercase_ : Optional[int] = f.read()
lowercase_ : Tuple = re.findall(R"""(.*) = ([0-9.]*)""" , A__ )
lowercase_ : str = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowercase_ : Optional[int] = float(A__ ) if """.""" in value else int(A__ )
lowercase_ : Any = re.findall(R"""(.*activations) = \(\'(.*)\',\)""" , A__ )[0]
lowercase_ : str = str(activation[1] )
lowercase_ : Union[str, Any] = num_experts
lowercase_ : Union[str, Any] = SwitchTransformersConfig(**A__ )
return config
def lowerCamelCase ( UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict=None , UpperCAmelCase__ : List[Any]="./" , UpperCAmelCase__ : Optional[int]=8 ) -> Union[str, Any]:
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
lowercase_ : str = checkpoints.load_tax_checkpoint(A__ )
if gin_file is not None:
lowercase_ : Dict = convert_gin_to_config(A__ , A__ )
else:
lowercase_ : List[str] = SwitchTransformersConfig.from_pretrained(A__ )
lowercase_ : Optional[Any] = SwitchTransformersForConditionalGeneration(A__ )
lowercase_ : List[Any] = flax_params["""target"""]
lowercase_ : int = flatten_dict(A__ , sep="""/""" )
lowercase_ : Tuple = rename_keys(A__ )
lowercase_ : Optional[Any] = unflatten_dict(A__ , sep="""/""" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(A__ , A__ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(A__ )
if __name__ == "__main__":
_lowercase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
_lowercase : List[str] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 713
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase : Dict = {
"configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["BloomTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = [
"BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST",
"BloomForCausalLM",
"BloomModel",
"BloomPreTrainedModel",
"BloomForSequenceClassification",
"BloomForTokenClassification",
"BloomForQuestionAnswering",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 30
| 0
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : Dict , UpperCAmelCase__ : Dict ):
while second != 0:
lowercase_ : Dict = first & second
first ^= second
lowercase_ : Any = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Dict = int(input("Enter the first number: ").strip())
_lowercase : List[str] = int(input("Enter the second number: ").strip())
print(f"""{add(first, second) = }""")
| 714
|
'''simple docstring'''
_lowercase : int = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def lowerCamelCase ( ) -> None:
lowercase_ : List[Any] = input("""Enter message: """ )
lowercase_ : str = input("""Enter key [alphanumeric]: """ )
lowercase_ : List[Any] = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
lowercase_ : List[str] = """encrypt"""
lowercase_ : Optional[int] = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith("""d""" ):
lowercase_ : Any = """decrypt"""
lowercase_ : Optional[Any] = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(F'''\n{mode.title()}ed message:''' )
print(UpperCAmelCase__ )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """encrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , """decrypt""" )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : str ) -> str:
lowercase_ : Union[str, Any] = []
lowercase_ : List[Any] = 0
lowercase_ : str = key.upper()
for symbol in message:
lowercase_ : Tuple = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
lowercase_ : Any = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 30
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.