code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( lowercase_ ):
"""simple docstring"""
a_ :List[Any] =["""image_processor""", """tokenizer"""]
a_ :List[Any] ="""CLIPImageProcessor"""
a_ :Optional[int] =("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : int , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Dict=None , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _lowercase , )
__a = kwargs.pop("""feature_extractor""" )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_lowercase , _lowercase )
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Tuple=None , **SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
__a = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if images is not None:
__a = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None and images is not None:
__a = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def __a ( self : Any , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def __a ( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def __a ( self : str ):
'''simple docstring'''
__a = self.tokenizer.model_input_names
__a = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __a ( self : Optional[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _lowercase , )
return self.image_processor_class
@property
def __a ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , _lowercase , )
return self.image_processor
| 582 |
_snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def _A ( __magic_name__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__magic_name__ )
lowercase__ = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
lowercase__ = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
lowercase__ = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def _A ( __magic_name__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 0 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
UpperCAmelCase_ = 2_048
UpperCAmelCase_ = 4_096
UpperCAmelCase_ = 42
UpperCAmelCase_ = os.environ.pop("""PROCESS_TRAIN""", """false""")
UpperCAmelCase_ = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def __magic_name__ ( lowercase ) -> int:
"""simple docstring"""
def choose_first(lowercase , lowercase=False ):
assert isinstance(lowercase , lowercase )
if len(lowercase ) == 1:
lowercase_ : Any = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowercase_ : Any = {k: [a[k]] for k in a}
if len(a["""start_token"""] ) > 0:
break
return a
lowercase_ : List[str] = {"""id""": example["""id"""]}
lowercase_ : Any = example["""annotations"""]
lowercase_ : Any = annotation["""yes_no_answer"""]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowercase_ : Dict = ["""yes"""] if 1 in yes_no_answer else ["""no"""]
lowercase_ : Dict = []
lowercase_ : Any = []
lowercase_ : Union[str, Any] = ["""<cls>"""]
else:
lowercase_ : Any = ["""short"""]
lowercase_ : Optional[int] = choose_first(annotation["""short_answers"""] )
if len(out["""start_token"""] ) == 0:
# answer will be long if short is not available
lowercase_ : Any = ["""long"""]
lowercase_ : str = choose_first(annotation["""long_answer"""] , is_long_answer=lowercase )
lowercase_ : List[Any] = []
answer.update(lowercase )
# disregard some samples
if len(answer["""start_token"""] ) > 1 or answer["start_token"] == answer["end_token"]:
lowercase_ : Any = True
else:
lowercase_ : List[Any] = False
lowercase_ : List[Any] = ["""start_token""", """end_token""", """start_byte""", """end_byte""", """text"""]
if not all(isinstance(answer[k] , lowercase ) for k in cols ):
raise ValueError("""Issue in ID""" , example["""id"""] )
return answer
def __magic_name__ ( lowercase , lowercase=False ) -> Optional[int]:
"""simple docstring"""
lowercase_ : List[str] = _get_single_answer(lowercase )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase_ : Union[str, Any] = example["""document"""]["""tokens"""]
lowercase_ : List[str] = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
return {
"context": " ".join(lowercase ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowercase_ : str = ["""start_token""", """end_token"""]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowercase_ : Optional[Any] = example["""document"""]["""tokens"""]
lowercase_ : Any = answer["""start_token"""]
lowercase_ : Dict = answer["""end_token"""]
lowercase_ : Any = []
for i in range(len(doc["""token"""] ) ):
if not doc["is_html"][i]:
context.append(doc["""token"""][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowercase_ : Union[str, Any] = """ """.join(context[start_token:end_token] )
# checking above code
if assertion:
lowercase_ : Tuple = doc["""is_html"""][answer["""start_token"""] : answer["""end_token"""]]
lowercase_ : int = doc["""token"""][answer["""start_token"""] : answer["""end_token"""]]
lowercase_ : Union[str, Any] = """ """.join([old[i] for i in range(len(lowercase ) ) if not is_html[i]] )
if new != old:
print("""ID:""" , example["""id"""] )
print("""New:""" , lowercase , end="""\n""" )
print("""Old:""" , lowercase , end="""\n\n""" )
return {
"context": " ".join(lowercase ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __magic_name__ ( lowercase , lowercase , lowercase=2048 , lowercase=4096 , lowercase=True ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : int = get_context_and_ans(lowercase , assertion=lowercase )
lowercase_ : Optional[Any] = out["""answer"""]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowercase_ : List[str] = tokenizer(example["""question"""]["""text"""] , out["""context"""] ).input_ids
lowercase_ : List[Any] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase_ : Tuple = []
lowercase_ : Any = []
lowercase_ : List[Any] = input_ids[:q_len]
lowercase_ : List[str] = range(lowercase , len(lowercase ) , max_length - doc_stride )
for i in doc_start_indices:
lowercase_ : str = i + max_length - q_len
lowercase_ : Dict = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["""category"""][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowercase ),
"end_token": [-100] * len(lowercase ),
"category": category,
},
}
lowercase_ : Any = out["""context"""].split()
lowercase_ : Dict = splitted_context[answer["""end_token"""]]
lowercase_ : Optional[Any] = len(
tokenizer(
""" """.join(splitted_context[: answer["""start_token"""]] ) , add_special_tokens=lowercase , ).input_ids )
lowercase_ : Dict = len(
tokenizer(""" """.join(splitted_context[: answer["""end_token"""]] ) , add_special_tokens=lowercase ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowercase_ : Any = len(tokenizer(lowercase , add_special_tokens=lowercase ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowercase_ : List[Any] = input_ids[answer["""start_token"""] : answer["""end_token"""] + 1] # right & left are inclusive
lowercase_ : Union[str, Any] = answer["""start_token"""]
lowercase_ : List[str] = answer["""end_token"""]
if assertion:
lowercase_ : Union[str, Any] = tokenizer.decode(lowercase )
if answer["span"] != new:
print("""ISSUE IN TOKENIZATION""" )
print("""OLD:""" , answer["""span"""] )
print("""NEW:""" , lowercase , end="""\n\n""" )
if len(lowercase ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowercase_ : Optional[Any] = input_ids[:q_len]
lowercase_ : Optional[int] = range(lowercase , len(lowercase ) , max_length - doc_stride )
lowercase_ : Optional[int] = []
lowercase_ : Optional[Any] = []
lowercase_ : List[str] = []
lowercase_ : List[Any] = [] # null, yes, no, long, short
for i in doc_start_indices:
lowercase_ : List[str] = i + max_length - q_len
lowercase_ : Dict = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowercase_ : int = start_token - i + q_len
lowercase_ : Dict = end_token - i + q_len
answers_category.append(answer["""category"""][0] ) # ["short"] -> "short"
else:
lowercase_ : Union[str, Any] = -100
lowercase_ : str = -100
answers_category.append("""null""" )
lowercase_ : Optional[int] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowercase )
answers_end_token.append(lowercase )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("""ISSUE in strided for ID:""" , example["""id"""] )
print("""New:""" , tokenizer.decode(lowercase ) )
print("""Old:""" , tokenizer.decode(lowercase ) , end="""\n\n""" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __magic_name__ ( lowercase , lowercase , lowercase=2048 , lowercase=4096 , lowercase=False ) -> Optional[int]:
"""simple docstring"""
lowercase_ : int = get_strided_contexts_and_ans(
lowercase , lowercase , doc_stride=lowercase , max_length=lowercase , assertion=lowercase , )
return example
def __magic_name__ ( lowercase , lowercase ) -> List[Any]:
"""simple docstring"""
with jsonlines.open(lowercase , """a""" ) as writer:
for example in tqdm(lowercase , total=len(lowercase ) , desc="""Saving samples ... """ ):
lowercase_ : List[str] = example["""labels"""]
for ids, start, end, cat in zip(
example["""input_ids"""] , labels["""start_token"""] , labels["""end_token"""] , labels["""category"""] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"""input_ids""": ids,
"""start_token""": start,
"""end_token""": end,
"""category""": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
UpperCAmelCase_ = load_dataset("""natural_questions""")
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
UpperCAmelCase_ = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
UpperCAmelCase_ = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
UpperCAmelCase_ = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
UpperCAmelCase_ = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
UpperCAmelCase_ = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name) | 458 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Dict , _lowercase :TransformeraDModel , _lowercase :AutoencoderKL , _lowercase :KarrasDiffusionSchedulers , _lowercase :Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
lowercase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ = int(_lowercase )
lowercase__ = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
lowercase__ = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[Any] , _lowercase :List[int] , _lowercase :float = 4.0 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :int = 50 , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = len(_lowercase )
lowercase__ = self.transformer.config.sample_size
lowercase__ = self.transformer.config.in_channels
lowercase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
lowercase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
lowercase__ = torch.tensor([10_00] * batch_size , device=self.device )
lowercase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ = latent_model_input[: len(_lowercase ) // 2]
lowercase__ = torch.cat([half, half] , dim=0 )
lowercase__ = self.scheduler.scale_model_input(_lowercase , _lowercase )
lowercase__ = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ = latent_model_input.device.type == "mps"
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.floataa if is_mps else torch.floataa
else:
lowercase__ = torch.intaa if is_mps else torch.intaa
lowercase__ = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__ , lowercase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__ , lowercase__ = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
lowercase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ = torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__ , lowercase__ = torch.split(_lowercase , _lowercase , dim=1 )
else:
lowercase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
lowercase__ , lowercase__ = latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ = latent_model_input
lowercase__ = 1 / self.vae.config.scaling_factor * latents
lowercase__ = self.vae.decode(_lowercase ).sample
lowercase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def UpperCamelCase_ ( A__ : Union[str, Any]="" ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = tempfile.mkdtemp()
return os.path.join(A__ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Optional[int] ) -> Any:
lowerCAmelCase_ : str = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCAmelCase_ : int = AgentAudio(_lowercase )
lowerCAmelCase_ : Union[str, Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_lowercase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_lowercase ) )
# Ensure that the file contains the same value as the original tensor
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = sf.read(_lowercase )
self.assertTrue(torch.allclose(_lowercase , torch.tensor(_lowercase ) , atol=1E-4 ) )
def __lowercase ( self : Any ) -> str:
lowerCAmelCase_ : Dict = torch.rand(12 , dtype=torch.floataa ) - 0.5
lowerCAmelCase_ : Any = get_new_path(suffix=""".wav""" )
sf.write(_lowercase , _lowercase , 1_60_00 )
lowerCAmelCase_ : Dict = AgentAudio(_lowercase )
self.assertTrue(torch.allclose(_lowercase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , _lowercase )
@require_vision
@require_torch
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : Any ) -> int:
lowerCAmelCase_ : Optional[Any] = torch.randint(0 , 2_56 , (64, 64, 3) )
lowerCAmelCase_ : Dict = AgentImage(_lowercase )
lowerCAmelCase_ : Optional[int] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_lowercase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowercase ) )
def __lowercase ( self : Dict ) -> Dict:
lowerCAmelCase_ : Optional[Any] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
lowerCAmelCase_ : int = Image.open(_lowercase )
lowerCAmelCase_ : str = AgentImage(_lowercase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowercase ) )
def __lowercase ( self : str ) -> Union[str, Any]:
lowerCAmelCase_ : List[Any] = Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png"""
lowerCAmelCase_ : str = Image.open(_lowercase )
lowerCAmelCase_ : int = AgentImage(_lowercase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_lowercase ) )
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : str ) -> List[Any]:
lowerCAmelCase_ : Dict = """Hey!"""
lowerCAmelCase_ : int = AgentText(_lowercase )
self.assertEqual(_lowercase , agent_type.to_string() )
self.assertEqual(_lowercase , agent_type.to_raw() )
self.assertEqual(_lowercase , _lowercase )
| 275 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 0 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
_UpperCamelCase = logging.get_logger(__name__)
def a_ ( _lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ) -> int:
# Recurse if needed
if "." in tensor_name:
__lowerCamelCase : Optional[int] = tensor_name.split('.' )
for split in splits[:-1]:
__lowerCamelCase : List[Any] = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.' )
__lowerCamelCase : Optional[int] = new_module
__lowerCamelCase : List[str] = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.' )
__lowerCamelCase : Dict = tensor_name in module._buffers
__lowerCamelCase : int = getattr(_lowerCAmelCase ,_lowerCAmelCase )
if old_value.device == torch.device('meta' ) and device not in ["meta", torch.device('meta' )] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.' )
__lowerCamelCase : Tuple = False
__lowerCamelCase : str = False
if is_buffer or not is_bitsandbytes_available():
__lowerCamelCase : str = False
__lowerCamelCase : int = False
else:
__lowerCamelCase : Optional[Any] = hasattr(bnb.nn ,'Params4bit' ) and isinstance(module._parameters[tensor_name] ,bnb.nn.Paramsabit )
__lowerCamelCase : Any = isinstance(module._parameters[tensor_name] ,bnb.nn.IntaParams )
if is_abit or is_abit:
__lowerCamelCase : List[str] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
__lowerCamelCase : Optional[int] = old_value.to(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase ,torch.Tensor ):
__lowerCamelCase : Any = value.to('cpu' )
if value.dtype == torch.inta:
__lowerCamelCase : str = version.parse(importlib.metadata.version('bitsandbytes' ) ) > version.parse(
'0.37.2' )
if not is_abit_serializable:
raise ValueError(
'Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. '
'Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.' )
else:
__lowerCamelCase : Dict = torch.tensor(_lowerCAmelCase ,device='cpu' )
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls ,_lowerCAmelCase ) and fpaa_statistics is None:
__lowerCamelCase : Tuple = new_value.T
__lowerCamelCase : Any = old_value.__dict__
if is_abit:
__lowerCamelCase : Optional[int] = bnb.nn.IntaParams(_lowerCAmelCase ,requires_grad=_lowerCAmelCase ,**_lowerCAmelCase ).to(_lowerCAmelCase )
elif is_abit:
__lowerCamelCase : List[str] = bnb.nn.Paramsabit(_lowerCAmelCase ,requires_grad=_lowerCAmelCase ,**_lowerCAmelCase ).to(_lowerCAmelCase )
__lowerCamelCase : List[str] = new_value
if fpaa_statistics is not None:
setattr(module.weight ,'SCB' ,fpaa_statistics.to(_lowerCAmelCase ) )
else:
if value is None:
__lowerCamelCase : str = old_value.to(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase ,torch.Tensor ):
__lowerCamelCase : str = value.to(_lowerCAmelCase )
else:
__lowerCamelCase : Any = torch.tensor(_lowerCAmelCase ,device=_lowerCAmelCase )
if is_buffer:
__lowerCamelCase : Optional[Any] = new_value
else:
__lowerCamelCase : str = nn.Parameter(_lowerCAmelCase ,requires_grad=old_value.requires_grad )
__lowerCamelCase : str = new_value
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=False ) -> Dict:
for name, module in model.named_children():
if current_key_name is None:
__lowerCamelCase : Optional[int] = []
current_key_name.append(_lowerCAmelCase )
if (isinstance(_lowerCAmelCase ,nn.Linear ) or isinstance(_lowerCAmelCase ,_lowerCAmelCase )) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in '.'.join(_lowerCAmelCase ) for key in modules_to_not_convert ):
with init_empty_weights():
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase ,__lowerCamelCase : List[str] = module.weight.shape
else:
__lowerCamelCase : List[str] = module.in_features
__lowerCamelCase : str = module.out_features
if quantization_config.quantization_method() == "llm_int8":
__lowerCamelCase : str = bnb.nn.LinearabitLt(
_lowerCAmelCase ,_lowerCAmelCase ,module.bias is not None ,has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight ,threshold=quantization_config.llm_inta_threshold ,)
__lowerCamelCase : Tuple = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
__lowerCamelCase : str = bnb.nn.Linearabit(
_lowerCAmelCase ,_lowerCAmelCase ,module.bias is not None ,quantization_config.bnb_abit_compute_dtype ,compress_statistics=quantization_config.bnb_abit_use_double_quant ,quant_type=quantization_config.bnb_abit_quant_type ,)
__lowerCamelCase : int = True
# Store the module class in case we need to transpose the weight later
__lowerCamelCase : str = type(_lowerCAmelCase )
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(_lowerCAmelCase )
if len(list(module.children() ) ) > 0:
__lowerCamelCase ,__lowerCamelCase : Dict = _replace_with_bnb_linear(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,has_been_replaced=_lowerCAmelCase ,)
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a_ ( _lowerCAmelCase ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=None ) -> Optional[Any]:
__lowerCamelCase : Dict = ['lm_head'] if modules_to_not_convert is None else modules_to_not_convert
__lowerCamelCase ,__lowerCamelCase : int = _replace_with_bnb_linear(
_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def a_ ( *_lowerCAmelCase ,**_lowerCAmelCase ) -> str:
warnings.warn(
'`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead' ,_lowerCAmelCase ,)
return replace_with_bnb_linear(*_lowerCAmelCase ,**_lowerCAmelCase )
def a_ ( *_lowerCAmelCase ,**_lowerCAmelCase ) -> Tuple:
warnings.warn(
'`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead' ,_lowerCAmelCase ,)
return set_module_quantized_tensor_to_device(*_lowerCAmelCase ,**_lowerCAmelCase )
def a_ ( _lowerCAmelCase ) -> int:
__lowerCamelCase : List[str] = deepcopy(_lowerCAmelCase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
__lowerCamelCase : Any = find_tied_parameters(_lowerCAmelCase )
# For compatibility with Accelerate < 0.18
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__lowerCamelCase : List[str] = sum(list(tied_params.values() ) ,[] ) + list(tied_params.keys() )
else:
__lowerCamelCase : str = sum(_lowerCAmelCase ,[] )
__lowerCamelCase : str = len(_lowerCAmelCase ) > 0
# Check if it is a base model
__lowerCamelCase : List[Any] = not hasattr(_lowerCAmelCase ,model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
__lowerCamelCase : Optional[int] = list(model.named_children() )
__lowerCamelCase : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
__lowerCamelCase : Dict = set(_lowerCAmelCase ) - set(_lowerCAmelCase )
__lowerCamelCase : int = list(set(_lowerCAmelCase ) ) + list(_lowerCAmelCase )
# remove ".weight" from the keys
__lowerCamelCase : Union[str, Any] = ['.weight', '.bias']
__lowerCamelCase : str = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
__lowerCamelCase : Union[str, Any] = name.replace(_lowerCAmelCase ,'' )
filtered_module_names.append(_lowerCAmelCase )
return filtered_module_names
| 459 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git_vision_model'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Dict=30_72 , _lowercase :Tuple=12 , _lowercase :List[str]=12 , _lowercase :Tuple=3 , _lowercase :Dict=2_24 , _lowercase :Tuple=16 , _lowercase :Optional[int]="quick_gelu" , _lowercase :Union[str, Any]=1e-5 , _lowercase :Tuple=0.0 , _lowercase :Tuple=0.02 , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls :List[str] , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git'
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :List[str]=3_05_22 , _lowercase :Tuple=7_68 , _lowercase :Any=6 , _lowercase :Dict=12 , _lowercase :Any=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Tuple=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[Any]=10_24 , _lowercase :Any=0.02 , _lowercase :int=1e-12 , _lowercase :List[Any]=0 , _lowercase :int="absolute" , _lowercase :List[str]=True , _lowercase :Any=False , _lowercase :int=1_01 , _lowercase :str=1_02 , _lowercase :Dict=None , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ = GitVisionConfig(**_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 655 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( __snake_case : List[str] , __snake_case : str ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = position
_lowerCamelCase : List[str] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_lowerCamelCase : Any = []
for position in positions:
_lowerCamelCase , _lowerCamelCase : List[str] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def _snake_case ( __snake_case : Optional[Any] ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( __snake_case : int , __snake_case : Optional[int] , __snake_case : int ):
"""simple docstring"""
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
_lowerCamelCase , _lowerCamelCase : int = position
if board[y][x] == 0:
_lowerCamelCase : Tuple = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
_lowerCamelCase : Union[str, Any] = 0
return False
def _snake_case ( __snake_case : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
_lowerCamelCase : Any = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
_lowerCamelCase : Any = 0
_lowerCamelCase : Any = F'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModel.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModel.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForPreTraining.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForCausalLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForMaskedLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForQuestionAnswering.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
| 655 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _lowerCAmelCase( lowercase_ ):
"""simple docstring"""
a : Union[str, Any] ='''naver-clova-ix/donut-base-finetuned-docvqa'''
a : Optional[Any] =(
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
a : Union[str, Any] ='''document_qa'''
a : str =AutoProcessor
a : List[str] =VisionEncoderDecoderModel
a : Any =['''image''', '''text''']
a : str =['''text''']
def __init__( self , *_lowerCamelCase , **_lowerCamelCase ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*_lowercase , **_lowercase )
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Any = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
UpperCamelCase_: str = task_prompt.replace('{user_input}' , _lowercase )
UpperCamelCase_: str = self.pre_processor.tokenizer(
_lowercase , add_special_tokens=_lowercase , return_tensors='pt' ).input_ids
UpperCamelCase_: List[str] = self.pre_processor(_lowercase , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _a ( self , _lowerCamelCase ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_lowercase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_lowercase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_lowercase , ).sequences
def _a ( self , _lowerCamelCase ):
UpperCamelCase_: Dict = self.pre_processor.batch_decode(_lowercase )[0]
UpperCamelCase_: int = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
UpperCamelCase_: Dict = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
UpperCamelCase_: int = re.sub(r'<.*?>' , '' , _lowercase , count=1 ).strip() # remove first task start token
UpperCamelCase_: Tuple = self.pre_processor.tokenajson(_lowercase )
return sequence["answer"] | 57 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
def _A ( __magic_name__ ):
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def _A ( __magic_name__ ):
lowercase__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:256, :]
lowercase__ = in_proj_bias_cross_attn[:256]
lowercase__ = in_proj_weight_cross_attn[256:512, :]
lowercase__ = in_proj_bias_cross_attn[256:512]
lowercase__ = in_proj_weight_cross_attn[-256:, :]
lowercase__ = in_proj_bias_cross_attn[-256:]
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ , lowercase__ = image.size
lowercase__ = max(__magic_name__ , __magic_name__ )
lowercase__ = 800 if "detection" in checkpoint_url else 1000
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( __magic_name__ ):
lowercase__ = F.to_tensor(__magic_name__ )
lowercase__ = F.normalize(__magic_name__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: "table", 1: "table rotated"}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 125
lowercase__ = 6
lowercase__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
lowercase__ = TableTransformerForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify our conversion
lowercase__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__magic_name__ )
lowercase__ = Image.open(__magic_name__ ).convert("RGB" )
lowercase__ = normalize(resize(__magic_name__ , __magic_name__ ) ).unsqueeze(0 )
lowercase__ = model(__magic_name__ )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowercase__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowercase__ = (1, 125, 7)
lowercase__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowercase__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__magic_name__ )
image_processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 655 | 0 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] ) -> Tuple:
return 1 if input_a == input_a else 0
def _snake_case ( ) -> str:
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 153 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Any = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class UpperCamelCase ( lowercase_ ):
A__ = """van"""
def __init__( self , snake_case__=224 , snake_case__=3 , snake_case__=[7, 3, 3, 3] , snake_case__=[4, 2, 2, 2] , snake_case__=[64, 128, 320, 512] , snake_case__=[3, 3, 12, 3] , snake_case__=[8, 8, 4, 4] , snake_case__="gelu" , snake_case__=0.02 , snake_case__=1E-6 , snake_case__=1E-2 , snake_case__=0.0 , snake_case__=0.0 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**_lowercase )
_SCREAMING_SNAKE_CASE : str = image_size
_SCREAMING_SNAKE_CASE : Tuple = num_channels
_SCREAMING_SNAKE_CASE : Union[str, Any] = patch_sizes
_SCREAMING_SNAKE_CASE : List[Any] = strides
_SCREAMING_SNAKE_CASE : Any = hidden_sizes
_SCREAMING_SNAKE_CASE : Any = depths
_SCREAMING_SNAKE_CASE : Union[str, Any] = mlp_ratios
_SCREAMING_SNAKE_CASE : int = hidden_act
_SCREAMING_SNAKE_CASE : Optional[int] = initializer_range
_SCREAMING_SNAKE_CASE : Any = layer_norm_eps
_SCREAMING_SNAKE_CASE : str = layer_scale_init_value
_SCREAMING_SNAKE_CASE : Optional[int] = drop_path_rate
_SCREAMING_SNAKE_CASE : Dict = dropout_rate
| 572 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A ( __magic_name__ , __magic_name__ , __magic_name__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :UNetaDConditionModel , _lowercase :DDPMScheduler , _lowercase :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :int , _lowercase :str ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase__ = latents.to(_lowercase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self :int , _lowercase :int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self :int , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 1_00 , _lowercase :float = 4.0 , _lowercase :int = 1 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"image_embeds": image_embeds}
lowercase__ = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __UpperCamelCase :
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
_lowercase : Dict = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
a__ = ObjectDetectionPipeline(model=_lowercase , image_processor=_lowercase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _UpperCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
a__ = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(_lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_lowercase , {
'''score''': ANY(_lowercase ),
'''label''': ANY(_lowercase ),
'''box''': {'''xmin''': ANY(_lowercase ), '''ymin''': ANY(_lowercase ), '''xmax''': ANY(_lowercase ), '''ymax''': ANY(_lowercase )},
} , )
import datasets
a__ = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
a__ = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
a__ = object_detector(_lowercase , threshold=0.0 )
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for outputs in batch_outputs:
self.assertGreater(len(_lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_lowercase , {
'''score''': ANY(_lowercase ),
'''label''': ANY(_lowercase ),
'''box''': {'''xmin''': ANY(_lowercase ), '''ymin''': ANY(_lowercase ), '''xmax''': ANY(_lowercase ), '''ymax''': ANY(_lowercase )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def _UpperCAmelCase ( self ) -> str:
pass
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
a__ = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
a__ = AutoModelForObjectDetection.from_pretrained(_lowercase )
a__ = AutoFeatureExtractor.from_pretrained(_lowercase )
a__ = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase )
a__ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
] , )
a__ = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
[
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
] , )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> int:
a__ = '''facebook/detr-resnet-50'''
a__ = AutoModelForObjectDetection.from_pretrained(_lowercase )
a__ = AutoFeatureExtractor.from_pretrained(_lowercase )
a__ = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase )
a__ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
a__ = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Optional[Any]:
a__ = '''facebook/detr-resnet-50'''
a__ = pipeline('''object-detection''' , model=_lowercase )
a__ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
a__ = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def _UpperCAmelCase ( self ) -> Union[str, Any]:
a__ = 0.99_85
a__ = '''facebook/detr-resnet-50'''
a__ = pipeline('''object-detection''' , model=_lowercase )
a__ = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=_lowercase )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def _UpperCAmelCase ( self ) -> Optional[int]:
a__ = '''Narsil/layoutlmv3-finetuned-funsd'''
a__ = 0.99_93
a__ = pipeline('''object-detection''' , model=_lowercase , threshold=_lowercase )
a__ = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.99_93, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
{'''score''': 0.99_93, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
] , )
| 194 |
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
class snake_case_ ( lowercase_ ):
def __init__( self , *a_ , **a_ ):
warnings.warn(
"The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PerceiverImageProcessor instead." , _lowercase , )
super().__init__(*_lowercase , **_lowercase ) | 237 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
# setable values
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , _lowercase :CommonSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray ):
'''simple docstring'''
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase )
@dataclass
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase = 42
@property
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self :str , _lowercase :int = 10_00 , _lowercase :float = 0.0001 , _lowercase :float = 0.02 , _lowercase :str = "linear" , _lowercase :Optional[jnp.ndarray] = None , _lowercase :str = "fixed_small" , _lowercase :bool = True , _lowercase :str = "epsilon" , _lowercase :jnp.dtype = jnp.floataa , ):
'''simple docstring'''
lowercase__ = dtype
def UpperCAmelCase ( self :str , _lowercase :Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :List[str] , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :Tuple = () ):
'''simple docstring'''
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , _lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Tuple , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :List[str]=None , _lowercase :Tuple=None ):
'''simple docstring'''
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(_lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(_lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self :Optional[int] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :int , _lowercase :jnp.ndarray , _lowercase :Optional[jax.random.KeyArray] = None , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(_lowercase , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(_lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(_lowercase , num=1 )
lowercase__ = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase )
def UpperCAmelCase ( self :int , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase )
def __len__( self :List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 655 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCamelCase : Optional[int] = logging.get_logger(__name__)
_UpperCamelCase : Optional[int] = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _snake_case ( lowercase_ ):
SCREAMING_SNAKE_CASE : Dict = '''yolos'''
def __init__( self , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1e-12 , _SCREAMING_SNAKE_CASE=[5_12, 8_64] , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.1 , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = qkv_bias
lowerCAmelCase = num_detection_tokens
lowerCAmelCase = use_mid_position_embeddings
lowerCAmelCase = auxiliary_loss
# Hungarian matcher
lowerCAmelCase = class_cost
lowerCAmelCase = bbox_cost
lowerCAmelCase = giou_cost
# Loss coefficients
lowerCAmelCase = bbox_loss_coefficient
lowerCAmelCase = giou_loss_coefficient
lowerCAmelCase = eos_coefficient
class _snake_case ( lowercase_ ):
SCREAMING_SNAKE_CASE : List[str] = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return 12
| 284 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_snake_case = logging.get_logger(__name__)
_snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowercase_ )} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCamelCase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCamelCase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCamelCase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCamelCase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self :Optional[Any] , _lowercase :SquadDataTrainingArguments , _lowercase :PreTrainedTokenizer , _lowercase :Optional[int] = None , _lowercase :Union[str, Split] = Split.train , _lowercase :Optional[bool] = False , _lowercase :Optional[str] = None , _lowercase :Optional[str] = "pt" , ):
'''simple docstring'''
lowercase__ = args
lowercase__ = is_language_sensitive
lowercase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowercase__ = mode
# Load data features from cache or dataset file
lowercase__ = "v2" if args.version_2_with_negative else "v1"
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase__ = self.old_features["features"]
lowercase__ = self.old_features.get("dataset" , _lowercase )
lowercase__ = self.old_features.get("examples" , _lowercase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
lowercase__ , lowercase__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
lowercase__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self :Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = self.features[i]
lowercase__ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase__ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase__ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase__ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 655 | 0 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowercase_ )
class lowerCAmelCase_ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
requires_backends(self , """vision""" )
self.check_model_type(_lowercase )
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, List[str], "Image.Image", List["Image.Image"]] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def __a ( self : List[str] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
return {}, {}, {}
def __a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] ):
'''simple docstring'''
__a = load_image(_lowercase )
__a = image.size
__a = self.image_processor(images=_lowercase , return_tensors=self.framework )
return model_inputs
def __a ( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
__a = self.model(**_lowercase )
return model_outputs
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
__a = model_outputs.predicted_depth
__a = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode="""bicubic""" , align_corners=_lowercase )
__a = prediction.squeeze().cpu().numpy()
__a = (output * 2_5_5 / np.max(_lowercase )).astype("""uint8""" )
__a = Image.fromarray(_lowercase )
__a = {}
__a = predicted_depth
__a = depth
return output_dict
| 582 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = """▁"""
_snake_case = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_snake_case = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_snake_case = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_snake_case = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
_snake_case = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ["input_ids"]
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = RESOURCE_FILES_NAMES
def __init__( self :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=False , _lowercase :Dict="utf8" , _lowercase :Optional[Any]="[UNK]" , _lowercase :Optional[int]="[SEP]" , _lowercase :List[str]="[PAD]" , _lowercase :Dict="[CLS]" , _lowercase :Optional[Any]="[MASK]" , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , vocab_file=_lowercase , encoding=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
lowercase__ = do_lower_case
lowercase__ = sentencepiece_model_ckpt
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase__ = self.load_vocab(filepath=_lowercase )
else:
lowercase__ = {self.sp_model.id_to_piece(_lowercase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase__ = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase ( self :Any , _lowercase :Dict ):
'''simple docstring'''
if text is None:
return None
lowercase__ = self.tokenize(_lowercase )
lowercase__ , lowercase__ = "", []
for i, ch in enumerate(_lowercase ):
if ch in self.SP_CHAR_MAPPING:
lowercase__ = self.SP_CHAR_MAPPING.get(_lowercase )
else:
lowercase__ = unicodedata.normalize("NFKC" , _lowercase )
if self.is_whitespace(_lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowercase ) )
lowercase__ , lowercase__ , lowercase__ = normalized_text, [], 0
if self.do_lower_case:
lowercase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase__ = token[1:]
lowercase__ = text[offset:].index(_lowercase ) + offset
lowercase__ = start + len(_lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase__ = end
return token_mapping
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self :Any ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_lowercase , _lowercase ) for c in text) )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=64 , _lowercase :Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowercase__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowercase__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowercase__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowercase__ = self.sp_model.EncodeAsPieces(_lowercase )
else:
lowercase__ = self.sp_model.SampleEncodeAsPieces(_lowercase , _lowercase , _lowercase )
lowercase__ = []
for pi, piece in enumerate(_lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowercase ) and pi != 0:
new_pieces.append(_lowercase )
continue
else:
continue
lowercase__ = 0
for i, chunk in enumerate(_lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowercase ) or self.is_punct(_lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowercase )
lowercase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
if len(_lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase ( self :Tuple , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.convert_ids_to_tokens(_lowercase )
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
return self.vocab.get(_lowercase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(_lowercase , self.unk_token )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :Tuple=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase ( self :Dict , _lowercase :int , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Dict=None , _lowercase :Optional[Any]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase ( self :int , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowercase ) + 1) + [1] * (len(_lowercase ) + 3)
def UpperCAmelCase ( self :str , _lowercase :Optional[int] ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Dict ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase ( self :List[str] , _lowercase :List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowercase ) == 1:
lowercase__ = unicodedata.category(_lowercase )
if cat == "Zs":
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = {}
with io.open(_lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_lowercase ):
lowercase__ = line.rstrip("\n" )
lowercase__ = int(_lowercase )
return token_to_idx
def UpperCAmelCase ( self :List[str] , _lowercase :str , _lowercase :Optional[str] = None ):
'''simple docstring'''
lowercase__ = 0
if os.path.isdir(_lowercase ):
lowercase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ = token_index
writer.write(token + "\n" )
index += 1
lowercase__ = os.path.join(_lowercase , "sentencepiece.bpe.model" )
with open(_lowercase , "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (vocab_file,)
| 655 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ = logging.getLogger(__name__)
@dataclass(frozen=lowercase_ )
class UpperCamelCase__ :
'''simple docstring'''
__a : Any = 42
__a : List[Any] = 42
__a : Optional[Any] = None
__a : Tuple = None
__a : List[Any] = None
@dataclass(frozen=lowercase_ )
class UpperCamelCase__ :
'''simple docstring'''
__a : Union[str, Any] = 42
__a : int = None
__a : str = None
__a : int = None
__a : int = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCamelCase__ ( lowercase_ ):
'''simple docstring'''
__a : Optional[Any] = 42
def __init__( self, snake_case__, snake_case__, snake_case__, snake_case__ = None, snake_case__=False, snake_case__ = False, ) -> Dict:
"""simple docstring"""
lowercase_ : Optional[Any] = hans_processors[task]()
lowercase_ : Optional[int] = os.path.join(
_lowercase, """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""", tokenizer.__class__.__name__, str(_lowercase ), _lowercase, ), )
lowercase_ : Optional[int] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase_ , lowercase_ : Optional[int] = label_list[2], label_list[1]
lowercase_ : List[str] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase_ : str = cached_features_file + """.lock"""
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
lowercase_ : int = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
lowercase_ : Optional[int] = (
processor.get_dev_examples(_lowercase ) if evaluate else processor.get_train_examples(_lowercase )
)
logger.info("""Training examples: %s""", len(_lowercase ) )
lowercase_ : List[Any] = hans_convert_examples_to_features(_lowercase, _lowercase, _lowercase, _lowercase )
logger.info("""Saving features into cached file %s""", _lowercase )
torch.save(self.features, _lowercase )
def __len__( self ) -> Tuple:
"""simple docstring"""
return len(self.features )
def __getitem__( self, snake_case__ ) -> Any:
"""simple docstring"""
return self.features[i]
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__ :
'''simple docstring'''
__a : List[Any] = 42
def __init__( self, snake_case__, snake_case__, snake_case__, snake_case__ = 1_28, snake_case__=False, snake_case__ = False, ) -> List[Any]:
"""simple docstring"""
lowercase_ : Any = hans_processors[task]()
lowercase_ : Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase_ , lowercase_ : Optional[Any] = label_list[2], label_list[1]
lowercase_ : Any = label_list
lowercase_ : Tuple = processor.get_dev_examples(_lowercase ) if evaluate else processor.get_train_examples(_lowercase )
lowercase_ : List[Any] = hans_convert_examples_to_features(_lowercase, _lowercase, _lowercase, _lowercase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ), desc="""convert examples to features""" ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(_lowercase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowercase_ : List[str] = tf.data.Dataset.from_generator(
_lowercase, (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
), (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
), )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
return self.dataset
def __len__( self ) -> Tuple:
"""simple docstring"""
return len(self.features )
def __getitem__( self, snake_case__ ) -> str:
"""simple docstring"""
return self.features[i]
def snake_case__ ( self ) -> Any:
"""simple docstring"""
return self.label_list
class UpperCamelCase__ ( lowercase_ ):
'''simple docstring'''
def snake_case__ ( self, snake_case__ ) -> Any:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowercase, """heuristics_train_set.txt""" ) ), """train""" )
def snake_case__ ( self, snake_case__ ) -> Any:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(_lowercase, """heuristics_evaluation_set.txt""" ) ), """dev""" )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def snake_case__ ( self, snake_case__, snake_case__ ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Optional[int] = []
for i, line in enumerate(_lowercase ):
if i == 0:
continue
lowercase_ : Union[str, Any] = """%s-%s""" % (set_type, line[0])
lowercase_ : Dict = line[5]
lowercase_ : int = line[6]
lowercase_ : List[str] = line[7][2:] if line[7].startswith("""ex""" ) else line[7]
lowercase_ : List[Any] = line[0]
examples.append(InputExample(guid=_lowercase, text_a=_lowercase, text_b=_lowercase, label=_lowercase, pairID=_lowercase ) )
return examples
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase , ) -> List[str]:
"""simple docstring"""
lowercase_ : List[str] = {label: i for i, label in enumerate(lowercase )}
lowercase_ : Any = []
for ex_index, example in tqdm.tqdm(enumerate(lowercase ) , desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
lowercase_ : List[Any] = tokenizer(
example.text_a , example.text_b , add_special_tokens=lowercase , max_length=lowercase , padding="""max_length""" , truncation=lowercase , return_overflowing_tokens=lowercase , )
lowercase_ : Dict = label_map[example.label] if example.label in label_map else 0
lowercase_ : Any = int(example.pairID )
features.append(InputFeatures(**lowercase , label=lowercase , pairID=lowercase ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"""guid: {example}""" )
logger.info(f"""features: {features[i]}""" )
return features
UpperCAmelCase_ = {
"""hans""": 3,
}
UpperCAmelCase_ = {
"""hans""": HansProcessor,
} | 458 |
def _A ( __magic_name__ ):
lowercase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A ( __magic_name__ = 100 ):
lowercase__ = 1
lowercase__ = 2
for i in range(2 , max_n + 1 ):
lowercase__ = pre_numerator
lowercase__ = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ = cur_numerator
lowercase__ = e_cont * pre_numerator + temp
return sum_digits(__magic_name__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 655 | 0 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __snake_case ( lowercase_ ,lowercase_):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
lowerCamelCase : int = 4 , lowerCamelCase : int = 7_68 , lowerCamelCase : int , lowerCamelCase : Optional[int] , ) -> Dict:
super().__init__()
lowerCAmelCase_ : str = nn.Parameter(torch.zeros(_lowercase ) )
# parameters for additional clip time embeddings
lowerCAmelCase_ : int = nn.Linear(_lowercase , _lowercase )
lowerCAmelCase_ : Optional[Any] = nn.Linear(_lowercase , _lowercase )
# parameters for encoder hidden states
lowerCAmelCase_ : Any = clip_extra_context_tokens
lowerCAmelCase_ : Any = nn.Linear(
_lowercase , self.clip_extra_context_tokens * cross_attention_dim )
lowerCAmelCase_ : List[Any] = nn.Linear(_lowercase , _lowercase )
lowerCAmelCase_ : List[Any] = nn.LayerNorm(_lowercase )
def __lowercase ( self : List[str] , *, lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict ) -> Dict:
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
lowerCAmelCase_ : Optional[int] = image_embeddings.shape[0]
lowerCAmelCase_ : Dict = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
lowerCAmelCase_ : Optional[int] = classifier_free_guidance_embeddings.expand(
_lowercase , -1 )
lowerCAmelCase_ : List[str] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
lowerCAmelCase_ : Any = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
lowerCAmelCase_ : Optional[Any] = self.embedding_proj(_lowercase )
lowerCAmelCase_ : Any = self.clip_image_embeddings_project_to_time_embeddings(_lowercase )
lowerCAmelCase_ : int = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
lowerCAmelCase_ : Tuple = self.clip_extra_context_tokens_proj(_lowercase )
lowerCAmelCase_ : str = clip_extra_context_tokens.reshape(_lowercase , -1 , self.clip_extra_context_tokens )
lowerCAmelCase_ : Dict = clip_extra_context_tokens.permute(0 , 2 , 1 )
lowerCAmelCase_ : Any = self.encoder_hidden_states_proj(_lowercase )
lowerCAmelCase_ : List[Any] = self.text_encoder_hidden_states_norm(_lowercase )
lowerCAmelCase_ : Any = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 275 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'AutoTokenizer'
__lowerCamelCase = ['tokenizer']
__lowerCamelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowercase__ = None
else:
with open(_lowercase ) as speaker_embeddings_json:
lowercase__ = json.load(_lowercase )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(_lowercase )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
lowercase__ = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowercase__ = np.load(_lowercase )
return voice_preset_dict
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ):
lowercase__ = voice_preset + ".npz"
lowercase__ = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase )
lowercase__ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 655 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel
from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings
from diffusers.utils import load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
_UpperCamelCase = False
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self : List[str] ) -> List[str]:
return 12
@property
def _lowercase ( self : Optional[int] ) -> Tuple:
return 12
@property
def _lowercase ( self : Dict ) -> Optional[Any]:
return 32
@property
def _lowercase ( self : int ) -> int:
torch.manual_seed(0 )
__lowerCamelCase : Union[str, Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , )
return model
@property
def _lowercase ( self : int ) -> Tuple:
__lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowercase ( self : int ) -> Dict:
torch.manual_seed(0 )
__lowerCamelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(_lowercase )
@property
def _lowercase ( self : str ) -> Union[str, Any]:
torch.manual_seed(0 )
__lowerCamelCase : List[Any] = 12
__lowerCamelCase : Any = 12
__lowerCamelCase : Any = {
'attention_bias': True,
'cross_attention_dim': 32,
'attention_head_dim': height * width,
'num_attention_heads': 1,
'num_vector_embeds': self.num_embed,
'num_embeds_ada_norm': self.num_embeds_ada_norm,
'norm_num_groups': 32,
'sample_size': width,
'activation_fn': 'geglu-approximate',
}
__lowerCamelCase : Any = TransformeraDModel(**_lowercase )
return model
def _lowercase ( self : int ) -> Tuple:
__lowerCamelCase : Optional[Any] = 'cpu'
__lowerCamelCase : List[Any] = self.dummy_vqvae
__lowerCamelCase : Optional[Any] = self.dummy_text_encoder
__lowerCamelCase : Optional[Any] = self.dummy_tokenizer
__lowerCamelCase : Optional[Any] = self.dummy_transformer
__lowerCamelCase : Optional[int] = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase : Optional[int] = LearnedClassifierFreeSamplingEmbeddings(learnable=_lowercase )
__lowerCamelCase : str = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
__lowerCamelCase : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__lowerCamelCase : Optional[Any] = 'teddy bear playing in the pool'
__lowerCamelCase : List[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
__lowerCamelCase : str = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type='np' )
__lowerCamelCase : List[Any] = output.images
__lowerCamelCase : int = torch.Generator(device=_lowercase ).manual_seed(0 )
__lowerCamelCase : Optional[int] = pipe(
[prompt] , generator=_lowercase , output_type='np' , return_dict=_lowercase , num_inference_steps=2 )[0]
__lowerCamelCase : Any = image[0, -3:, -3:, -1]
__lowerCamelCase : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase : str = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
__lowerCamelCase : int = 'cpu'
__lowerCamelCase : str = self.dummy_vqvae
__lowerCamelCase : int = self.dummy_text_encoder
__lowerCamelCase : List[str] = self.dummy_tokenizer
__lowerCamelCase : Optional[Any] = self.dummy_transformer
__lowerCamelCase : Optional[int] = VQDiffusionScheduler(self.num_embed )
__lowerCamelCase : List[str] = LearnedClassifierFreeSamplingEmbeddings(
learnable=_lowercase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length )
__lowerCamelCase : int = VQDiffusionPipeline(
vqvae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , transformer=_lowercase , scheduler=_lowercase , learned_classifier_free_sampling_embeddings=_lowercase , )
__lowerCamelCase : Dict = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__lowerCamelCase : str = 'teddy bear playing in the pool'
__lowerCamelCase : Dict = torch.Generator(device=_lowercase ).manual_seed(0 )
__lowerCamelCase : List[Any] = pipe([prompt] , generator=_lowercase , num_inference_steps=2 , output_type='np' )
__lowerCamelCase : Any = output.images
__lowerCamelCase : Optional[Any] = torch.Generator(device=_lowercase ).manual_seed(0 )
__lowerCamelCase : Optional[Any] = pipe(
[prompt] , generator=_lowercase , output_type='np' , return_dict=_lowercase , num_inference_steps=2 )[0]
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 24, 24, 3)
__lowerCamelCase : Optional[int] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : int ) -> List[Any]:
__lowerCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy' )
__lowerCamelCase : Any = VQDiffusionPipeline.from_pretrained('microsoft/vq-diffusion-ithq' )
__lowerCamelCase : Union[str, Any] = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
# requires GPU generator for gumbel softmax
# don't use GPU generator in tests though
__lowerCamelCase : int = torch.Generator(device=_lowercase ).manual_seed(0 )
__lowerCamelCase : str = pipeline(
'teddy bear playing in the pool' , num_images_per_prompt=1 , generator=_lowercase , output_type='np' , )
__lowerCamelCase : Optional[int] = output.images[0]
assert image.shape == (256, 256, 3)
assert np.abs(expected_image - image ).max() < 2.0
| 459 |
import math
import random
def _A ( __magic_name__ , __magic_name__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.02
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__magic_name__ ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 100) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("""Expected value: """))
_snake_case = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 655 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class lowercase__ ( lowercase_ ):
__UpperCAmelCase = '''gptsan-japanese'''
__UpperCAmelCase = [
'''past_key_values''',
]
__UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , SCREAMING_SNAKE_CASE=3_6000 , SCREAMING_SNAKE_CASE=1280 , SCREAMING_SNAKE_CASE=1024 , SCREAMING_SNAKE_CASE=8192 , SCREAMING_SNAKE_CASE=4096 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=128 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-5 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE="float32" , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=0.0_02 , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=3_5998 , SCREAMING_SNAKE_CASE=3_5995 , SCREAMING_SNAKE_CASE=3_5999 , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Optional[int] = max_position_embeddings
_lowerCamelCase : Dict = d_model
_lowerCamelCase : Union[str, Any] = d_ff
_lowerCamelCase : Tuple = d_ext
_lowerCamelCase : Dict = d_spout
_lowerCamelCase : str = num_switch_layers
_lowerCamelCase : List[str] = num_ext_layers
_lowerCamelCase : List[str] = num_switch_layers + num_ext_layers
_lowerCamelCase : Any = num_heads
_lowerCamelCase : List[Any] = num_experts
_lowerCamelCase : List[str] = expert_capacity
_lowerCamelCase : List[Any] = dropout_rate
_lowerCamelCase : Dict = layer_norm_epsilon
_lowerCamelCase : Any = router_bias
_lowerCamelCase : Union[str, Any] = router_jitter_noise
_lowerCamelCase : int = router_dtype
_lowerCamelCase : Union[str, Any] = router_ignore_padding_tokens
_lowerCamelCase : Union[str, Any] = output_hidden_states
_lowerCamelCase : Union[str, Any] = output_attentions
_lowerCamelCase : Dict = initializer_factor
_lowerCamelCase : List[str] = output_router_logits
_lowerCamelCase : List[Any] = use_cache
super().__init__(
separator_token_id=_lowercase , pad_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase , )
| 88 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'van'
def __init__( self :Optional[Any] , _lowercase :Dict=2_24 , _lowercase :Union[str, Any]=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Any=[4, 2, 2, 2] , _lowercase :Union[str, Any]=[64, 1_28, 3_20, 5_12] , _lowercase :List[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :int="gelu" , _lowercase :List[Any]=0.02 , _lowercase :List[Any]=1e-6 , _lowercase :Any=1e-2 , _lowercase :int=0.0 , _lowercase :int=0.0 , **_lowercase :Dict , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 655 | 0 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Dict = {
'configuration_mgp_str': ['MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MgpstrConfig'],
'processing_mgp_str': ['MgpstrProcessor'],
'tokenization_mgp_str': ['MgpstrTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Any = [
'MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST',
'MgpstrModel',
'MgpstrPreTrainedModel',
'MgpstrForSceneTextRecognition',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
A_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 57 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( enum.Enum ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
@add_end_docstrings(lowercase_ )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self :Any , *_lowercase :Optional[Any] , **_lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowercase__ = None
if self.model.config.prefix is not None:
lowercase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowercase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowercase__ , lowercase__ , lowercase__ = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
lowercase__ = {**self._preprocess_params, **preprocess_params}
lowercase__ = {**self._forward_params, **forward_params}
def UpperCAmelCase ( self :Tuple , _lowercase :Optional[Any]=None , _lowercase :List[Any]=None , _lowercase :List[str]=None , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=None , _lowercase :Any=None , _lowercase :Any=None , _lowercase :Dict=None , **_lowercase :Union[str, Any] , ):
'''simple docstring'''
lowercase__ = {}
if prefix is not None:
lowercase__ = prefix
if prefix:
lowercase__ = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowercase__ = handle_long_generation
preprocess_params.update(_lowercase )
lowercase__ = generate_kwargs
lowercase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.TENSORS
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self :int , *_lowercase :Optional[int] , **_lowercase :List[str] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self :Union[str, Any] , _lowercase :Dict , **_lowercase :Tuple ):
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple , _lowercase :Optional[int]="" , _lowercase :Tuple=None , **_lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prompt_text
if handle_long_generation == "hole":
lowercase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowercase__ = generate_kwargs["max_new_tokens"]
else:
lowercase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowercase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowercase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCAmelCase ( self :str , _lowercase :int , **_lowercase :str ):
'''simple docstring'''
lowercase__ = model_inputs["input_ids"]
lowercase__ = model_inputs.get("attention_mask" , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowercase__ = None
lowercase__ = None
lowercase__ = 1
else:
lowercase__ = input_ids.shape[0]
lowercase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowercase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowercase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowercase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowercase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowercase__ = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
lowercase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowercase__ = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase ( self :Any , _lowercase :Tuple , _lowercase :str=ReturnType.FULL_TEXT , _lowercase :Dict=True ):
'''simple docstring'''
lowercase__ = model_outputs["generated_sequence"][0]
lowercase__ = model_outputs["input_ids"]
lowercase__ = model_outputs["prompt_text"]
lowercase__ = generated_sequence.numpy().tolist()
lowercase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowercase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowercase__ = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowercase__ = 0
else:
lowercase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
lowercase__ = prompt_text + text[prompt_length:]
else:
lowercase__ = text[prompt_length:]
lowercase__ = {"generated_text": all_text}
records.append(_lowercase )
return records
| 655 | 0 |
"""simple docstring"""
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowercase__ ( lowercase_, lowercase_, unittest.TestCase ):
_UpperCAmelCase :Optional[int] = IFImgaImgSuperResolutionPipeline
_UpperCAmelCase :Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"width", "height"}
_UpperCAmelCase :int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"original_image"} )
_UpperCAmelCase :str = PipelineTesterMixin.required_optional_params - {"latents"}
def UpperCAmelCase__ ( self : int ):
return self._get_superresolution_dummy_components()
def UpperCAmelCase__ ( self : int , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ):
if str(_lowercase ).startswith("mps" ):
lowerCamelCase_ : int =torch.manual_seed(_lowercase )
else:
lowerCamelCase_ : str =torch.Generator(device=_lowercase ).manual_seed(_lowercase )
lowerCamelCase_ : Tuple =floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowercase ) ).to(_lowercase )
lowerCamelCase_ : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(_lowercase ) ).to(_lowercase )
lowerCamelCase_ : List[str] ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCAmelCase__ ( self : Any ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def UpperCAmelCase__ ( self : Optional[int] ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCAmelCase__ ( self : Optional[Any] ):
super().test_save_load_floataa(expected_max_diff=1E-1 )
def UpperCAmelCase__ ( self : Optional[Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def UpperCAmelCase__ ( self : Optional[Any] ):
self._test_save_load_local()
def UpperCAmelCase__ ( self : str ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 153 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_snake_case = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _A ( __magic_name__ ):
lowercase__ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__magic_name__ )[0]
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(rows * cols * num_images )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
lowercase__ = data.reshape(__magic_name__ , __magic_name__ , __magic_name__ , 1 )
return data
@deprecated(__magic_name__ , "Please use tf.one_hot on tensors." )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = labels_dense.shape[0]
lowercase__ = numpy.arange(__magic_name__ ) * num_classes
lowercase__ = numpy.zeros((num_labels, num_classes) )
lowercase__ = 1
return labels_one_hot
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(__magic_name__ )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__magic_name__ , __magic_name__ )
return labels
class lowerCAmelCase :
@deprecated(
_lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :List[str] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple=False , _lowercase :str=False , _lowercase :Dict=dtypes.floataa , _lowercase :Optional[Any]=True , _lowercase :Any=None , ):
'''simple docstring'''
lowercase__ , lowercase__ = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowercase__ = 1_00_00
lowercase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ = images.astype(numpy.floataa )
lowercase__ = numpy.multiply(_lowercase , 1.0 / 255.0 )
lowercase__ = images
lowercase__ = labels
lowercase__ = 0
lowercase__ = 0
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase ( self :str , _lowercase :Union[str, Any] , _lowercase :Any=False , _lowercase :Union[str, Any]=True ):
'''simple docstring'''
if fake_data:
lowercase__ = [1] * 7_84
lowercase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
lowercase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perma]
lowercase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ = self._num_examples - start
lowercase__ = self._images[start : self._num_examples]
lowercase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perm]
lowercase__ = self.labels[perm]
# Start next epoch
lowercase__ = 0
lowercase__ = batch_size - rest_num_examples
lowercase__ = self._index_in_epoch
lowercase__ = self._images[start:end]
lowercase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__magic_name__ , "Please write your own downloading logic." )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not gfile.Exists(__magic_name__ ):
gfile.MakeDirs(__magic_name__ )
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
if not gfile.Exists(__magic_name__ ):
urllib.request.urlretrieve(__magic_name__ , __magic_name__ ) # noqa: S310
with gfile.GFile(__magic_name__ ) as f:
lowercase__ = f.size()
print("Successfully downloaded" , __magic_name__ , __magic_name__ , "bytes." )
return filepath
@deprecated(
__magic_name__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=False , __magic_name__=dtypes.floataa , __magic_name__=True , __magic_name__=5000 , __magic_name__=None , __magic_name__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__magic_name__ , one_hot=__magic_name__ , dtype=__magic_name__ , seed=__magic_name__ )
lowercase__ = fake()
lowercase__ = fake()
lowercase__ = fake()
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
if not source_url: # empty string check
lowercase__ = DEFAULT_SOURCE_URL
lowercase__ = "train-images-idx3-ubyte.gz"
lowercase__ = "train-labels-idx1-ubyte.gz"
lowercase__ = "t10k-images-idx3-ubyte.gz"
lowercase__ = "t10k-labels-idx1-ubyte.gz"
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
if not 0 <= validation_size <= len(__magic_name__ ):
lowercase__ = (
"Validation size should be between 0 and "
f'''{len(__magic_name__ )}. Received: {validation_size}.'''
)
raise ValueError(__magic_name__ )
lowercase__ = train_images[:validation_size]
lowercase__ = train_labels[:validation_size]
lowercase__ = train_images[validation_size:]
lowercase__ = train_labels[validation_size:]
lowercase__ = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
| 655 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : List[Any] = logging.get_logger(__name__)
lowercase_ : List[str] = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class UpperCamelCase ( lowercase_ ):
A__ = """git_vision_model"""
def __init__( self , snake_case__=768 , snake_case__=3072 , snake_case__=12 , snake_case__=12 , snake_case__=3 , snake_case__=224 , snake_case__=16 , snake_case__="quick_gelu" , snake_case__=1E-5 , snake_case__=0.0 , snake_case__=0.02 , **snake_case__ , ):
"""simple docstring"""
super().__init__(**_lowercase )
_SCREAMING_SNAKE_CASE : Dict = hidden_size
_SCREAMING_SNAKE_CASE : int = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[int] = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = num_attention_heads
_SCREAMING_SNAKE_CASE : List[Any] = num_channels
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : Any = image_size
_SCREAMING_SNAKE_CASE : List[Any] = initializer_range
_SCREAMING_SNAKE_CASE : Tuple = attention_dropout
_SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_act
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , snake_case__ , **snake_case__ ):
"""simple docstring"""
cls._set_token_in_kwargs(_lowercase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
_SCREAMING_SNAKE_CASE : Tuple = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class UpperCamelCase ( lowercase_ ):
A__ = """git"""
def __init__( self , snake_case__=None , snake_case__=30522 , snake_case__=768 , snake_case__=6 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=1024 , snake_case__=0.02 , snake_case__=1E-12 , snake_case__=0 , snake_case__="absolute" , snake_case__=True , snake_case__=False , snake_case__=101 , snake_case__=102 , snake_case__=None , **snake_case__ , ):
"""simple docstring"""
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
_SCREAMING_SNAKE_CASE : Tuple = GitVisionConfig(**_lowercase )
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
_SCREAMING_SNAKE_CASE : List[Any] = hidden_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = num_hidden_layers
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : List[str] = hidden_act
_SCREAMING_SNAKE_CASE : str = intermediate_size
_SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : str = max_position_embeddings
_SCREAMING_SNAKE_CASE : Tuple = initializer_range
_SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Any = position_embedding_type
_SCREAMING_SNAKE_CASE : Tuple = use_cache
_SCREAMING_SNAKE_CASE : int = tie_word_embeddings
_SCREAMING_SNAKE_CASE : Optional[Any] = num_image_with_embedding
_SCREAMING_SNAKE_CASE : Dict = bos_token_id
_SCREAMING_SNAKE_CASE : List[str] = eos_token_id
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(self.__dict__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE : Optional[Any] = self.__class__.model_type
return output
| 572 |
from __future__ import annotations
class lowerCAmelCase :
def __init__( self :Union[str, Any] , _lowercase :List[Any]=None ):
'''simple docstring'''
lowercase__ = data
lowercase__ = None
def __repr__( self :Dict ):
'''simple docstring'''
lowercase__ = []
lowercase__ = self
while temp:
string_rep.append(f'''{temp.data}''' )
lowercase__ = temp.next
return "->".join(_lowercase )
def _A ( __magic_name__ ):
if not elements_list:
raise Exception("The Elements List is empty" )
lowercase__ = lowercase__ = Node(elements_list[0] )
for i in range(1 , len(__magic_name__ ) ):
lowercase__ = Node(elements_list[i] )
lowercase__ = current.next
return head
def _A ( __magic_name__ ):
if head_node is not None and isinstance(__magic_name__ , __magic_name__ ):
print_reverse(head_node.next )
print(head_node.data )
def _A ( ):
from doctest import testmod
testmod()
lowercase__ = make_linked_list([14, 52, 14, 12, 43] )
print("Linked List:" )
print(__magic_name__ )
print("Elements in Reverse:" )
print_reverse(__magic_name__ )
if __name__ == "__main__":
main()
| 655 | 0 |
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a_ : str = False
a_ : List[str] = False
def __a ( __UpperCAmelCase ):
return TrainCommand(__UpperCAmelCase )
class __UpperCamelCase ( lowercase_ ):
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE ) -> int:
a__ = parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=_lowercase , required=_lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=_lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=_lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=_lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=_lowercase , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=_lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=_lowercase , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=_lowercase , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=_lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=_lowercase , default=3_2 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=_lowercase , default=6_4 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=_lowercase , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=_lowercase , default=1e-0_8 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=_lowercase )
def __init__( self , SCREAMING_SNAKE_CASE ) -> Any:
a__ = logging.get_logger('''transformers-cli/training''' )
a__ = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowercase )
a__ = args.output
a__ = args.column_label
a__ = args.column_text
a__ = args.column_id
self.logger.info(f"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
a__ = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"Loading dataset from {args.train_data}" )
a__ = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a__ = None
if args.validation_data:
self.logger.info(f"Loading validation dataset from {args.validation_data}" )
a__ = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
a__ = args.validation_split
a__ = args.train_batch_size
a__ = args.valid_batch_size
a__ = args.learning_rate
a__ = args.adam_epsilon
def _UpperCAmelCase ( self ) -> Tuple:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _UpperCAmelCase ( self ) -> List[Any]:
raise NotImplementedError
def _UpperCAmelCase ( self ) -> Dict:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 194 |
import random
from .binary_exp_mod import bin_exp_mod
def _A ( __magic_name__ , __magic_name__=1000 ):
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase__ = n - 1
lowercase__ = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase__ = 0
while count < prec:
lowercase__ = random.randint(2 , n - 1 )
lowercase__ = bin_exp_mod(__magic_name__ , __magic_name__ , __magic_name__ )
if b != 1:
lowercase__ = True
for _ in range(__magic_name__ ):
if b == n - 1:
lowercase__ = False
break
lowercase__ = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
_snake_case = abs(int(input("""Enter bound : """).strip()))
print("""Here's the list of primes:""")
print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 655 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE_ = TypeVar("""T""")
SCREAMING_SNAKE_CASE_ = TypeVar("""U""")
class snake_case_ ( Generic[T, U] ):
def __init__( self , a_ , a_ ):
a_ : Optional[int] = key
a_ : List[str] = val
a_ : str = None
a_ : List[str] = None
def __repr__( self ):
return (
F"""Node: key: {self.key}, val: {self.val}, """
F"""has next: {bool(self.next )}, has prev: {bool(self.prev )}"""
)
class snake_case_ ( Generic[T, U] ):
def __init__( self ):
a_ : Optional[Any] = DoubleLinkedListNode(_lowercase , _lowercase )
a_ : str = DoubleLinkedListNode(_lowercase , _lowercase )
a_ , a_ : Any = self.rear, self.head
def __repr__( self ):
a_ : str = ["DoubleLinkedList"]
a_ : Optional[int] = self.head
while node.next is not None:
rep.append(str(_lowercase ) )
a_ : Optional[Any] = node.next
rep.append(str(self.rear ) )
return ",\n ".join(_lowercase )
def snake_case_ ( self , a_ ):
a_ : Any = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
a_ : Optional[Any] = node
a_ : Any = previous
a_ : Optional[Any] = node
a_ : Optional[int] = self.rear
def snake_case_ ( self , a_ ):
if node.prev is None or node.next is None:
return None
a_ : Optional[Any] = node.next
a_ : List[Any] = node.prev
a_ : List[Any] = None
a_ : int = None
return node
class snake_case_ ( Generic[T, U] ):
__lowerCAmelCase = {}
def __init__( self , a_ ):
a_ : Union[str, Any] = DoubleLinkedList()
a_ : int = capacity
a_ : Any = 0
a_ : Optional[int] = 0
a_ : Any = 0
a_ : List[Any] = {}
def __repr__( self ):
return (
F"""CacheInfo(hits={self.hits}, misses={self.miss}, """
F"""capacity={self.capacity}, current size={self.num_keys})"""
)
def __contains__( self , a_ ):
return key in self.cache
def snake_case_ ( self , a_ ):
if key in self.cache:
self.hits += 1
a_ : Union[str, Any] = self.cache[key]
a_ : str = self.list.remove(self.cache[key] )
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(_lowercase )
return node.val
self.miss += 1
return None
def snake_case_ ( self , a_ , a_ ):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
a_ : Union[str, Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(_lowercase ) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
a_ : int = DoubleLinkedListNode(_lowercase , _lowercase )
self.list.add(self.cache[key] )
self.num_keys += 1
else:
# bump node to the end of the list, update value
a_ : Union[str, Any] = self.list.remove(self.cache[key] )
assert node is not None # node guaranteed to be in list
a_ : Union[str, Any] = value
self.list.add(_lowercase )
@classmethod
def snake_case_ ( cls , a_ = 1_2_8 ):
def cache_decorator_inner(a_ ) -> Callable[..., U]:
def cache_decorator_wrapper(*a_ ) -> U:
if func not in cls.decorator_function_to_instance_map:
a_ : Optional[Any] = LRUCache(_lowercase )
a_ : Dict = cls.decorator_function_to_instance_map[func].get(args[0] )
if result is None:
a_ : List[Any] = func(*_lowercase )
cls.decorator_function_to_instance_map[func].put(args[0] , _lowercase )
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(_lowercase , "cache_info" , _lowercase ) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod() | 237 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class lowerCAmelCase :
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowercase__ = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowercase__ = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowercase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowercase__ = DDPMScheduler(
num_train_timesteps=10_00 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowercase__ = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["prompt"]
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
if "image" in inputs:
lowercase__ = inputs["image"]
else:
lowercase__ = None
if "mask_image" in inputs:
lowercase__ = inputs["mask_image"]
else:
lowercase__ = None
if "original_image" in inputs:
lowercase__ = inputs["original_image"]
else:
lowercase__ = None
lowercase__ , lowercase__ = pipe.encode_prompt(_lowercase )
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowercase , _lowercase , _lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowercase , _lowercase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = inputs["generator"]
lowercase__ = inputs["num_inference_steps"]
lowercase__ = inputs["output_type"]
# inputs with prompt converted to embeddings
lowercase__ = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowercase__ = image
if mask_image is not None:
lowercase__ = mask_image
if original_image is not None:
lowercase__ = original_image
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = self.get_dummy_components()
lowercase__ = self.pipeline_class(**_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe(**_lowercase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowercase )
lowercase__ = self.pipeline_class.from_pretrained(_lowercase )
pipe_loaded.to(_lowercase )
pipe_loaded.set_progress_bar_config(disable=_lowercase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowercase__ = self.get_dummy_inputs(_lowercase )
lowercase__ = pipe_loaded(**_lowercase )[0]
lowercase__ = np.abs(to_np(_lowercase ) - to_np(_lowercase ) ).max()
self.assertLess(_lowercase , 1e-4 )
| 655 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_UpperCamelCase : str = logging.get_logger(__name__)
_UpperCamelCase : List[str] = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_UpperCamelCase : Any = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_UpperCamelCase : Tuple = {
"allenai/led-base-16384": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
lowerCAmelCase = bs[:]
lowerCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case )
cs.append(2**8 + n )
n += 1
lowerCAmelCase = [chr(snake_case ) for n in cs]
return dict(zip(snake_case , snake_case ) )
def snake_case ( snake_case : Optional[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
return pairs
class _snake_case ( lowercase_ ):
SCREAMING_SNAKE_CASE : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : List[str] = ['''input_ids''', '''attention_mask''']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
'''simple docstring'''
lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else bos_token
lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else eos_token
lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else sep_token
lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else cls_token
lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else unk_token
lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase ) if isinstance(_lowercase , _lowercase ) else mask_token
super().__init__(
errors=_lowercase , bos_token=_lowercase , eos_token=_lowercase , unk_token=_lowercase , sep_token=_lowercase , cls_token=_lowercase , pad_token=_lowercase , mask_token=_lowercase , add_prefix_space=_lowercase , **_lowercase , )
with open(_lowercase , encoding='utf-8' ) as vocab_handle:
lowerCAmelCase = json.load(_lowercase )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
lowerCAmelCase = errors # how to handle errors in decoding
lowerCAmelCase = bytes_to_unicode()
lowerCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(_lowercase , encoding='utf-8' ) as merges_handle:
lowerCAmelCase = merges_handle.read().split('\n' )[1:-1]
lowerCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
lowerCAmelCase = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
lowerCAmelCase = {}
lowerCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowerCAmelCase = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowerCAmelCase = tuple(_lowercase )
lowerCAmelCase = get_pairs(_lowercase )
if not pairs:
return token
while True:
lowerCAmelCase = min(_lowercase , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_lowercase , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(_lowercase ):
try:
lowerCAmelCase = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase = tuple(_lowercase )
lowerCAmelCase = new_word
if len(_lowercase ) == 1:
break
else:
lowerCAmelCase = get_pairs(_lowercase )
lowerCAmelCase = ' '.join(_lowercase )
lowerCAmelCase = word
return word
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = []
for token in re.findall(self.pat , _lowercase ):
lowerCAmelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowercase ).split(' ' ) )
return bpe_tokens
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.decoder.get(_lowercase )
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = ''.join(_lowercase )
lowerCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if not os.path.isdir(_lowercase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase = os.path.join(
_lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_lowercase , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + '\n' )
lowerCAmelCase = 0
with open(_lowercase , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
lowerCAmelCase = token_index
writer.write(' '.join(_lowercase ) + '\n' )
index += 1
return vocab_file, merge_file
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowercase , token_ids_a=_lowercase , already_has_special_tokens=_lowercase )
if token_ids_a is None:
return [1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
'''simple docstring'''
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCAmelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowercase ) > 0 and not text[0].isspace()):
lowerCAmelCase = ' ' + text
return (text, kwargs)
def _SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ):
'''simple docstring'''
lowerCAmelCase = super()._pad(
encoded_inputs=_lowercase , max_length=_lowercase , padding_strategy=_lowercase , pad_to_multiple_of=_lowercase , return_attention_mask=_lowercase , )
# Load from model defaults
if return_attention_mask is None:
lowerCAmelCase = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCAmelCase = len(encoded_inputs['global_attention_mask'] ) != len(_lowercase )
if needs_to_be_padded:
lowerCAmelCase = len(_lowercase ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCAmelCase = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
lowerCAmelCase = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 284 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
lowercase__ = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowercase__ = model(_lowercase )["last_hidden_state"]
lowercase__ = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
lowercase__ = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 655 | 0 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE_ = parse(importlib.metadata.version('torch'))
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
__a = STR_OPERATION_TO_FUNC[operation]
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__a = parse(importlib.metadata.version(__SCREAMING_SNAKE_CASE ) )
return operation(__SCREAMING_SNAKE_CASE , parse(__SCREAMING_SNAKE_CASE ) )
def __lowercase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
return compare_versions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 582 |
_snake_case = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def _A ( __magic_name__ ):
# Make sure the supplied data is a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = f'''a bytes-like object is required, not \'{data.__class__.__name__}\''''
raise TypeError(__magic_name__ )
lowercase__ = "".join(bin(__magic_name__ )[2:].zfill(8 ) for byte in data )
lowercase__ = len(__magic_name__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
lowercase__ = B"=" * ((6 - len(__magic_name__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(__magic_name__ ) % 6)
else:
lowercase__ = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(__magic_name__ ) , 6 ) ).encode()
+ padding
)
def _A ( __magic_name__ ):
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(__magic_name__ , __magic_name__ ) and not isinstance(__magic_name__ , __magic_name__ ):
lowercase__ = (
"argument should be a bytes-like object or ASCII string, "
f'''not \'{encoded_data.__class__.__name__}\''''
)
raise TypeError(__magic_name__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(__magic_name__ , __magic_name__ ):
try:
lowercase__ = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
lowercase__ = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(__magic_name__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
lowercase__ = encoded_data[:-padding]
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
lowercase__ = "".join(
bin(B64_CHARSET.index(__magic_name__ ) )[2:].zfill(6 ) for char in encoded_data )
lowercase__ = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(__magic_name__ ) , 8 )
]
return bytes(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 655 | 0 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
UpperCAmelCase_ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
UpperCAmelCase_ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
UpperCAmelCase_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def __magic_name__ ( lowercase ) -> Any:
"""simple docstring"""
lowercase_ : Dict = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def __magic_name__ ( lowercase ) -> int:
"""simple docstring"""
return x[0]
def __magic_name__ ( lowercase ) -> Any:
"""simple docstring"""
lowercase_ : Optional[int] = get_letter_count(lowercase )
lowercase_ : Dict = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowercase )
lowercase_ : List[str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase )
lowercase_ : Union[str, Any] = """""".join(freq_to_letter[freq] )
lowercase_ : int = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowercase , reverse=lowercase )
lowercase_ : Union[str, Any] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowercase )
def __magic_name__ ( lowercase ) -> Optional[Any]:
"""simple docstring"""
lowercase_ : Optional[int] = get_frequency_order(lowercase )
lowercase_ : Optional[Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 458 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( lowercase_ ):
def __init__( self :Dict , _lowercase :TransformeraDModel , _lowercase :AutoencoderKL , _lowercase :KarrasDiffusionSchedulers , _lowercase :Optional[Dict[int, str]] = None , ):
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
lowercase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
lowercase__ = int(_lowercase )
lowercase__ = dict(sorted(self.labels.items() ) )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Union[str, List[str]] ):
'''simple docstring'''
if not isinstance(_lowercase , _lowercase ):
lowercase__ = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[Any] , _lowercase :List[int] , _lowercase :float = 4.0 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :int = 50 , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = len(_lowercase )
lowercase__ = self.transformer.config.sample_size
lowercase__ = self.transformer.config.in_channels
lowercase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
lowercase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
lowercase__ = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
lowercase__ = torch.tensor([10_00] * batch_size , device=self.device )
lowercase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
lowercase__ = latent_model_input[: len(_lowercase ) // 2]
lowercase__ = torch.cat([half, half] , dim=0 )
lowercase__ = self.scheduler.scale_model_input(_lowercase , _lowercase )
lowercase__ = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
lowercase__ = latent_model_input.device.type == "mps"
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.floataa if is_mps else torch.floataa
else:
lowercase__ = torch.intaa if is_mps else torch.intaa
lowercase__ = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
lowercase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
lowercase__ = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
lowercase__ , lowercase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
lowercase__ , lowercase__ = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
lowercase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
lowercase__ = torch.cat([half_eps, half_eps] , dim=0 )
lowercase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
lowercase__ , lowercase__ = torch.split(_lowercase , _lowercase , dim=1 )
else:
lowercase__ = noise_pred
# compute previous image: x_t -> x_t-1
lowercase__ = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
lowercase__ , lowercase__ = latent_model_input.chunk(2 , dim=0 )
else:
lowercase__ = latent_model_input
lowercase__ = 1 / self.vae.config.scaling_factor * latents
lowercase__ = self.vae.decode(_lowercase ).sample
lowercase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__A : Any = logging.get_logger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
def __init__( self : List[str] , lowerCamelCase : Optional[int]=False , lowerCamelCase : Tuple=False , lowerCamelCase : str=6.0 , lowerCamelCase : List[Any]=None , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : List[Any]=False , lowerCamelCase : List[Any]=None , lowerCamelCase : str="fp4" , lowerCamelCase : Dict=False , **lowerCamelCase : Dict , ) -> Tuple:
lowerCAmelCase_ : int = load_in_abit
lowerCAmelCase_ : Optional[Any] = load_in_abit
lowerCAmelCase_ : Any = llm_inta_threshold
lowerCAmelCase_ : Optional[int] = llm_inta_skip_modules
lowerCAmelCase_ : str = llm_inta_enable_fpaa_cpu_offload
lowerCAmelCase_ : int = llm_inta_has_fpaa_weight
lowerCAmelCase_ : Optional[int] = bnb_abit_quant_type
lowerCAmelCase_ : int = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
lowerCAmelCase_ : List[Any] = torch.floataa
elif isinstance(_lowercase , _lowercase ):
lowerCAmelCase_ : Dict = getattr(_lowercase , _lowercase )
elif isinstance(_lowercase , torch.dtype ):
lowerCAmelCase_ : str = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def __lowercase ( self : Optional[int] ) -> Dict:
if not isinstance(self.llm_inta_threshold , _lowercase ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , _lowercase ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , _lowercase ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , _lowercase ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , _lowercase ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , _lowercase ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def __lowercase ( self : List[str] ) -> str:
return self.load_in_abit or self.load_in_abit
def __lowercase ( self : List[str] ) -> Optional[int]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __lowercase ( cls : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , **lowerCamelCase : List[Any] ) -> List[str]:
lowerCAmelCase_ : List[str] = cls(**_lowercase )
lowerCAmelCase_ : Optional[Any] = []
for key, value in kwargs.items():
if hasattr(_lowercase , _lowercase ):
setattr(_lowercase , _lowercase , _lowercase )
to_remove.append(_lowercase )
for key in to_remove:
kwargs.pop(_lowercase , _lowercase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __lowercase ( self : Optional[Any] , lowerCamelCase : Union[str, os.PathLike] ) -> Dict:
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
lowerCAmelCase_ : Optional[int] = self.to_dict()
lowerCAmelCase_ : Tuple = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n"""
writer.write(_lowercase )
def __lowercase ( self : int ) -> List[str]:
lowerCAmelCase_ : List[str] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ : str = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__( self : List[str] ) -> str:
return F'{self.__class__.__name__} {self.to_json_string()}'
def __lowercase ( self : Tuple , lowerCamelCase : bool = True ) -> Dict:
if use_diff is True:
lowerCAmelCase_ : List[Any] = self.to_diff_dict()
else:
lowerCAmelCase_ : List[str] = self.to_dict()
return json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + "\n"
def __lowercase ( self : int ) -> Tuple:
lowerCAmelCase_ : List[Any] = self.to_dict()
# get the default config dict
lowerCAmelCase_ : str = BitsAndBytesConfig().to_dict()
lowerCAmelCase_ : Dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
lowerCAmelCase_ : Union[str, Any] = value
return serializable_config_dict
| 275 |
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class lowerCAmelCase ( lowercase_ ):
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
lowercase__ = SMALL_MODEL_IDENTIFIER
lowercase__ = "pt"
lowercase__ = "tf"
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(_lowercase )
def UpperCAmelCase ( self :Tuple , _lowercase :int ):
'''simple docstring'''
lowercase__ = TFAutoModel.from_pretrained(self.test_model , from_pt=_lowercase )
model_tf.save_pretrained(_lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = "mock_framework"
# Framework provided - return whatever the user provides
lowercase__ = FeaturesManager.determine_framework(self.test_model , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase , _lowercase )
self.assertEqual(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(_lowercase )
lowercase__ = FeaturesManager.determine_framework(_lowercase )
self.assertEqual(_lowercase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(_lowercase )
def UpperCAmelCase ( self :Any ):
'''simple docstring'''
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_tf )
# Both in environment -> use PyTorch
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(_lowercase , self.framework_pt )
# Both not in environment -> raise error
lowercase__ = MagicMock(return_value=_lowercase )
lowercase__ = MagicMock(return_value=_lowercase )
with patch("transformers.onnx.features.is_tf_available" , _lowercase ), patch(
"transformers.onnx.features.is_torch_available" , _lowercase ):
with self.assertRaises(_lowercase ):
lowercase__ = FeaturesManager.determine_framework(self.test_model )
| 655 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase_ )
class lowerCamelCase_ ( lowercase_ ):
"""simple docstring"""
a_ =field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a_ =Features({"""audio""": Audio()} )
a_ =Features({"""transcription""": Value("""string""" )} )
a_ ="""audio"""
a_ ="""transcription"""
def _lowercase ( self : List[str] , _a : Dict ) -> List[Any]:
if self.audio_column not in features:
raise ValueError(f'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , _lowercase ):
raise ValueError(f'Column {self.audio_column} is not an Audio type.' )
__lowerCamelCase : List[Any] = copy.deepcopy(self )
__lowerCamelCase : Dict = self.input_schema.copy()
__lowerCamelCase : List[str] = features[self.audio_column]
__lowerCamelCase : List[Any] = input_schema
return task_template
@property
def _lowercase ( self : Dict ) -> int:
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 459 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""microsoft/git-base""": """https://huggingface.co/microsoft/git-base/resolve/main/config.json""",
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git_vision_model'
def __init__( self :Dict , _lowercase :Dict=7_68 , _lowercase :Dict=30_72 , _lowercase :Tuple=12 , _lowercase :List[str]=12 , _lowercase :Tuple=3 , _lowercase :Dict=2_24 , _lowercase :Tuple=16 , _lowercase :Optional[int]="quick_gelu" , _lowercase :Union[str, Any]=1e-5 , _lowercase :Tuple=0.0 , _lowercase :Tuple=0.02 , **_lowercase :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = hidden_size
lowercase__ = intermediate_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = num_channels
lowercase__ = patch_size
lowercase__ = image_size
lowercase__ = initializer_range
lowercase__ = attention_dropout
lowercase__ = layer_norm_eps
lowercase__ = hidden_act
@classmethod
def UpperCAmelCase ( cls :List[str] , _lowercase :Union[str, os.PathLike] , **_lowercase :Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_lowercase )
lowercase__ , lowercase__ = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get("model_type" ) == "git":
lowercase__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_lowercase , **_lowercase )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'git'
def __init__( self :Union[str, Any] , _lowercase :Dict=None , _lowercase :List[str]=3_05_22 , _lowercase :Tuple=7_68 , _lowercase :Any=6 , _lowercase :Dict=12 , _lowercase :Any=30_72 , _lowercase :List[Any]="gelu" , _lowercase :Tuple=0.1 , _lowercase :Optional[int]=0.1 , _lowercase :Optional[Any]=10_24 , _lowercase :Any=0.02 , _lowercase :int=1e-12 , _lowercase :List[Any]=0 , _lowercase :int="absolute" , _lowercase :List[str]=True , _lowercase :Any=False , _lowercase :int=1_01 , _lowercase :str=1_02 , _lowercase :Dict=None , **_lowercase :List[str] , ):
'''simple docstring'''
super().__init__(bos_token_id=_lowercase , eos_token_id=_lowercase , pad_token_id=_lowercase , **_lowercase )
if vision_config is None:
lowercase__ = {}
logger.info("vision_config is None. initializing the GitVisionConfig with default values." )
lowercase__ = GitVisionConfig(**_lowercase )
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = hidden_act
lowercase__ = intermediate_size
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = position_embedding_type
lowercase__ = use_cache
lowercase__ = tie_word_embeddings
lowercase__ = num_image_with_embedding
lowercase__ = bos_token_id
lowercase__ = eos_token_id
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
lowercase__ = copy.deepcopy(self.__dict__ )
lowercase__ = self.vision_config.to_dict()
lowercase__ = self.__class__.model_type
return output
| 655 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
__UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase_ ( self) -> List[str]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : int = (3, 32, 128)
_lowerCamelCase : List[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : List[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
_lowerCamelCase : int = dict(zip(_lowercase , range(len(_lowercase))))
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(_lowercase) + """\n""")
_lowerCamelCase : Union[str, Any] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
_lowerCamelCase : str = os.path.join(self.tmpdirname , _lowercase)
with open(self.image_processor_file , """w""" , encoding="""utf-8""") as fp:
json.dump(_lowercase , _lowercase)
def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> int:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase)
def UpperCamelCase_ ( self , **SCREAMING_SNAKE_CASE) -> List[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase)
def UpperCamelCase_ ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : List[Any] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
_lowerCamelCase : Any = Image.fromarray(np.moveaxis(_lowercase , 0 , -1))
return image_input
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : str = self.get_image_processor()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Union[str, Any] = self.get_tokenizer()
_lowerCamelCase : Any = self.get_image_processor()
_lowerCamelCase : str = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
processor.save_pretrained(self.tmpdirname)
_lowerCamelCase : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""")
_lowerCamelCase : Dict = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
_lowerCamelCase : Dict = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Dict = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
_lowerCamelCase : int = self.prepare_image_inputs()
_lowerCamelCase : int = image_processor(_lowercase , return_tensors="""np""")
_lowerCamelCase : int = processor(images=_lowercase , return_tensors="""np""")
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2)
def UpperCamelCase_ ( self) -> Union[str, Any]:
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
_lowerCamelCase : List[str] = """test"""
_lowerCamelCase : int = processor(text=_lowercase)
_lowerCamelCase : Union[str, Any] = tokenizer(_lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def UpperCamelCase_ ( self) -> List[Any]:
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : Tuple = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
_lowerCamelCase : str = """test"""
_lowerCamelCase : str = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(list(inputs.keys()) , ["""pixel_values""", """labels"""])
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Tuple = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
_lowerCamelCase : Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Dict = processor.char_decode(_lowercase)
_lowerCamelCase : Tuple = tokenizer.batch_decode(_lowercase)
_lowerCamelCase : Any = [seq.replace(""" """ , """""") for seq in decoded_tok]
self.assertListEqual(_lowercase , _lowercase)
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : Optional[int] = self.get_tokenizer()
_lowerCamelCase : List[Any] = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
_lowerCamelCase : Dict = None
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Union[str, Any] = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Tuple = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : str = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
_lowerCamelCase : Any = torch.randn(1 , 27 , 38)
_lowerCamelCase : Any = torch.randn(1 , 27 , 5_0257)
_lowerCamelCase : str = torch.randn(1 , 27 , 3_0522)
_lowerCamelCase : Union[str, Any] = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""])
| 88 |
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class lowerCAmelCase ( unittest.TestCase ):
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModel.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModel.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForPreTraining.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForPreTraining.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForCausalLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForCausalLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForCausalLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForMaskedLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForMaskedLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForMaskedLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_pt=_lowercase )
lowercase__ , lowercase__ = TFAutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(_lowercase , from_tf=_lowercase )
lowercase__ , lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(
_lowercase , output_loading_info=_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForSequenceClassification.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForSequenceClassification.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
@slow
def UpperCAmelCase ( self :str ):
'''simple docstring'''
for model_name in ["bert-base-uncased"]:
lowercase__ = AutoConfig.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = TFAutoModelForQuestionAnswering.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
lowercase__ = AutoModelForQuestionAnswering.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsNotNone(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def UpperCAmelCase ( self :List[Any] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
lowercase__ = TFAutoModelWithLMHead.from_pretrained(_lowercase , from_pt=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
lowercase__ = AutoModelWithLMHead.from_pretrained(_lowercase , from_tf=_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_lowercase ) , 1_44_10 )
| 655 | 0 |
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__=False ) -> List[str]:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Any = len(set_a.intersection(UpperCAmelCase__ ) )
if alternative_union:
UpperCamelCase_: Union[str, Any] = len(UpperCAmelCase__ ) + len(UpperCAmelCase__ )
else:
UpperCamelCase_: Dict = len(set_a.union(UpperCAmelCase__ ) )
return intersection / union
if isinstance(UpperCAmelCase__ , (list, tuple) ) and isinstance(UpperCAmelCase__ , (list, tuple) ):
UpperCamelCase_: Union[str, Any] = [element for element in set_a if element in set_b]
if alternative_union:
UpperCamelCase_: Optional[Any] = len(UpperCAmelCase__ ) + len(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) / union
else:
UpperCamelCase_: Dict = set_a + [element for element in set_b if element not in set_a]
return len(UpperCAmelCase__ ) / len(UpperCAmelCase__ )
return len(UpperCAmelCase__ ) / len(UpperCAmelCase__ )
return None
if __name__ == "__main__":
A_ : Any = {'a', 'b', 'c', 'd', 'e'}
A_ : List[Any] = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b)) | 57 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_snake_case = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
F"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
F"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
def _A ( __magic_name__ ):
lowercase__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
lowercase__ = value
else:
lowercase__ = value
return new_state_dict
def _A ( __magic_name__ ):
lowercase__ = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
lowercase__ = in_proj_weight[:256, :]
lowercase__ = in_proj_bias[:256]
lowercase__ = in_proj_weight[256:512, :]
lowercase__ = in_proj_bias[256:512]
lowercase__ = in_proj_weight[-256:, :]
lowercase__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
lowercase__ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ = in_proj_weight_cross_attn[:256, :]
lowercase__ = in_proj_bias_cross_attn[:256]
lowercase__ = in_proj_weight_cross_attn[256:512, :]
lowercase__ = in_proj_bias_cross_attn[256:512]
lowercase__ = in_proj_weight_cross_attn[-256:, :]
lowercase__ = in_proj_bias_cross_attn[-256:]
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ , lowercase__ = image.size
lowercase__ = max(__magic_name__ , __magic_name__ )
lowercase__ = 800 if "detection" in checkpoint_url else 1000
lowercase__ = target_max_size / current_max_size
lowercase__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _A ( __magic_name__ ):
lowercase__ = F.to_tensor(__magic_name__ )
lowercase__ = F.normalize(__magic_name__ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
logger.info("Converting model..." )
# load original state dict
lowercase__ = torch.hub.load_state_dict_from_url(__magic_name__ , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
lowercase__ = rename_backbone_keys(__magic_name__ )
# query, key and value matrices need special treatment
read_in_q_k_v(__magic_name__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
lowercase__ = state_dict.pop(__magic_name__ )
lowercase__ = val
# create HuggingFace model and load state dict
lowercase__ = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ = 15
lowercase__ = 2
lowercase__ = {0: "table", 1: "table rotated"}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
else:
lowercase__ = 125
lowercase__ = 6
lowercase__ = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
lowercase__ = idalabel
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = DetrImageProcessor(
format="coco_detection" , max_size=800 if "detection" in checkpoint_url else 1000 )
lowercase__ = TableTransformerForObjectDetection(__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
# verify our conversion
lowercase__ = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
lowercase__ = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=__magic_name__ )
lowercase__ = Image.open(__magic_name__ ).convert("RGB" )
lowercase__ = normalize(resize(__magic_name__ , __magic_name__ ) ).unsqueeze(0 )
lowercase__ = model(__magic_name__ )
if "detection" in checkpoint_url:
lowercase__ = (1, 15, 3)
lowercase__ = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
lowercase__ = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
lowercase__ = (1, 125, 7)
lowercase__ = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
lowercase__ = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __magic_name__ , atol=1e-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
model.save_pretrained(__magic_name__ )
image_processor.save_pretrained(__magic_name__ )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
lowercase__ = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(__magic_name__ )
image_processor.push_to_hub(__magic_name__ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_snake_case = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 655 | 0 |
"""simple docstring"""
def _snake_case ( lowerCamelCase__ : Any ) -> str:
if any(not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or x < 0 for x in sequence ):
raise TypeError("Sequence must be list of non-negative integers" )
for _ in range(len(lowerCamelCase__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(lowerCamelCase__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 153 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_snake_case = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 655 | 0 |
"""simple docstring"""
from PIL import Image
def _lowerCAmelCase ( lowerCamelCase__ : Any ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = image.size
_SCREAMING_SNAKE_CASE : Optional[int] = 0
_SCREAMING_SNAKE_CASE : Dict = image.load()
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : List[Any] = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(lowerCamelCase__ ):
for i in range(lowerCamelCase__ ):
_SCREAMING_SNAKE_CASE : Tuple = 2_5_5 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
lowercase_ : List[Any] = mean_threshold(Image.open('''path_to_image''').convert('''L'''))
image.save('''output_image_path''')
| 572 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _A ( __magic_name__ , __magic_name__ , __magic_name__=8 ):
lowercase__ = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
lowercase__ = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase ( lowercase_ ):
def __init__( self :List[str] , _lowercase :UNetaDConditionModel , _lowercase :DDPMScheduler , _lowercase :VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=_lowercase , scheduler=_lowercase , movq=_lowercase , )
lowercase__ = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Tuple , _lowercase :List[str] , _lowercase :Tuple , _lowercase :Optional[Any] , _lowercase :int , _lowercase :str ):
'''simple docstring'''
if latents is None:
lowercase__ = randn_tensor(_lowercase , generator=_lowercase , device=_lowercase , dtype=_lowercase )
else:
if latents.shape != shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
lowercase__ = latents.to(_lowercase )
lowercase__ = latents * scheduler.init_noise_sigma
return latents
def UpperCAmelCase ( self :int , _lowercase :int=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
lowercase__ = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_lowercase , _lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
lowercase__ = torch.device(f'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
lowercase__ = None
for cpu_offloaded_model in [self.unet, self.movq]:
lowercase__ , lowercase__ = cpu_offload_with_hook(_lowercase , _lowercase , prev_module_hook=_lowercase )
# We'll offload the last model manually.
lowercase__ = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_lowercase , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_lowercase )
def __call__( self :int , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :Union[torch.FloatTensor, List[torch.FloatTensor]] , _lowercase :int = 5_12 , _lowercase :int = 5_12 , _lowercase :int = 1_00 , _lowercase :float = 4.0 , _lowercase :int = 1 , _lowercase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowercase :Optional[torch.FloatTensor] = None , _lowercase :Optional[str] = "pil" , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = self._execution_device
lowercase__ = guidance_scale > 1.0
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
lowercase__ = image_embeds.shape[0] * num_images_per_prompt
if isinstance(_lowercase , _lowercase ):
lowercase__ = torch.cat(_lowercase , dim=0 )
if do_classifier_free_guidance:
lowercase__ = image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = negative_image_embeds.repeat_interleave(_lowercase , dim=0 )
lowercase__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=_lowercase )
self.scheduler.set_timesteps(_lowercase , device=_lowercase )
lowercase__ = self.scheduler.timesteps
lowercase__ = self.unet.config.in_channels
lowercase__ , lowercase__ = downscale_height_and_width(_lowercase , _lowercase , self.movq_scale_factor )
# create initial latent
lowercase__ = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , _lowercase , _lowercase , _lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(_lowercase ) ):
# expand the latents if we are doing classifier free guidance
lowercase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowercase__ = {"image_embeds": image_embeds}
lowercase__ = self.unet(
sample=_lowercase , timestep=_lowercase , encoder_hidden_states=_lowercase , added_cond_kwargs=_lowercase , return_dict=_lowercase , )[0]
if do_classifier_free_guidance:
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
lowercase__ , lowercase__ = noise_pred.chunk(2 )
lowercase__ , lowercase__ = variance_pred.chunk(2 )
lowercase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
lowercase__ = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
lowercase__ , lowercase__ = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
lowercase__ = self.scheduler.step(
_lowercase , _lowercase , _lowercase , generator=_lowercase , )[0]
# post-processing
lowercase__ = self.movq.decode(_lowercase , force_not_quantize=_lowercase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
lowercase__ = image * 0.5 + 0.5
lowercase__ = image.clamp(0 , 1 )
lowercase__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
lowercase__ = self.numpy_to_pil(_lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowercase )
| 655 | 0 |
from collections import defaultdict
from math import ceil, sqrt
def __a ( __UpperCAmelCase = 100_0000 , __UpperCAmelCase = 10 ):
a__ = defaultdict(__UpperCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
a__ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
a__ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__UpperCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f'{solution() = }')
| 194 |
import inspect
import unittest
class lowerCAmelCase ( unittest.TestCase ):
def UpperCAmelCase ( self :int ):
'''simple docstring'''
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase ( self :Optional[Any] ):
'''simple docstring'''
import diffusers
from diffusers.dependency_versions_table import deps
lowercase__ = inspect.getmembers(_lowercase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowercase__ = "k-diffusion"
elif backend == "invisible_watermark":
lowercase__ = "invisible-watermark"
assert backend in deps, f'''{backend} is not in the deps table!'''
| 655 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class snake_case_ ( unittest.TestCase ):
def __init__( self , a_ , a_=1_3 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=9_9 , a_=3_2 , a_=5 , a_=4 , a_=3_7 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_1_2 , a_=1_6 , a_=2 , a_=0.02 , a_=4 , ):
a_ : Optional[Any] = parent
a_ : List[Any] = batch_size
a_ : List[str] = seq_length
a_ : str = is_training
a_ : Any = use_attention_mask
a_ : Optional[Any] = use_token_type_ids
a_ : Any = use_labels
a_ : int = vocab_size
a_ : str = hidden_size
a_ : List[Any] = num_hidden_layers
a_ : Optional[Any] = num_attention_heads
a_ : Union[str, Any] = intermediate_size
a_ : str = hidden_act
a_ : Union[str, Any] = hidden_dropout_prob
a_ : List[str] = attention_probs_dropout_prob
a_ : Union[str, Any] = max_position_embeddings
a_ : str = type_vocab_size
a_ : List[Any] = type_sequence_label_size
a_ : Union[str, Any] = initializer_range
a_ : List[Any] = num_choices
def snake_case_ ( self ):
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a_ : Dict = None
if self.use_attention_mask:
a_ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
a_ : Any = None
if self.use_token_type_ids:
a_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : List[str] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ):
a_ : Optional[Any] = self.prepare_config_and_inputs()
a_ , a_ , a_ , a_ : Tuple = config_and_inputs
a_ : Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class snake_case_ ( lowercase_ ,unittest.TestCase ):
__lowerCAmelCase = True
__lowerCAmelCase = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ):
a_ : str = FlaxRoFormerModelTester(self )
@slow
def snake_case_ ( self ):
for model_class_name in self.all_model_classes:
a_ : Optional[int] = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=_lowercase )
a_ : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class snake_case_ ( unittest.TestCase ):
@slow
def snake_case_ ( self ):
a_ : Union[str, Any] = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
a_ : Any = jnp.array([[0, 1, 2, 3, 4, 5]] )
a_ : int = model(_lowercase )[0]
a_ : Union[str, Any] = 5_0_0_0_0
a_ : Dict = (1, 6, vocab_size)
self.assertEqual(output.shape , _lowercase )
a_ : Union[str, Any] = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _lowercase , atol=1e-4 ) ) | 237 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class lowerCAmelCase :
__lowerCamelCase = 42
# setable values
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = None
@classmethod
def UpperCAmelCase ( cls :Union[str, Any] , _lowercase :CommonSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray ):
'''simple docstring'''
return cls(common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase )
@dataclass
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
class lowerCAmelCase ( lowercase_ , lowercase_ ):
__lowerCamelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
__lowerCamelCase = 42
@property
def UpperCAmelCase ( self :List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self :str , _lowercase :int = 10_00 , _lowercase :float = 0.0001 , _lowercase :float = 0.02 , _lowercase :str = "linear" , _lowercase :Optional[jnp.ndarray] = None , _lowercase :str = "fixed_small" , _lowercase :bool = True , _lowercase :str = "epsilon" , _lowercase :jnp.dtype = jnp.floataa , ):
'''simple docstring'''
lowercase__ = dtype
def UpperCAmelCase ( self :str , _lowercase :Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
lowercase__ = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowercase__ = jnp.array(1.0 , dtype=self.dtype )
lowercase__ = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_lowercase , init_noise_sigma=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Optional[Any] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :Optional[int] = None ):
'''simple docstring'''
return sample
def UpperCAmelCase ( self :List[str] , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :Tuple = () ):
'''simple docstring'''
lowercase__ = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowercase__ = (jnp.arange(0 , _lowercase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_lowercase , timesteps=_lowercase , )
def UpperCAmelCase ( self :Tuple , _lowercase :DDPMSchedulerState , _lowercase :int , _lowercase :List[str]=None , _lowercase :Tuple=None ):
'''simple docstring'''
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowercase__ = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowercase__ = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowercase__ = jnp.clip(_lowercase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowercase__ = jnp.log(jnp.clip(_lowercase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowercase__ = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowercase__ = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowercase__ = variance
lowercase__ = state.common.betas[t]
lowercase__ = (predicted_variance + 1) / 2
lowercase__ = frac * max_log + (1 - frac) * min_log
return variance
def UpperCAmelCase ( self :Optional[int] , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :int , _lowercase :jnp.ndarray , _lowercase :Optional[jax.random.KeyArray] = None , _lowercase :bool = True , ):
'''simple docstring'''
lowercase__ = timestep
if key is None:
lowercase__ = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowercase__ , lowercase__ = jnp.split(_lowercase , sample.shape[1] , axis=1 )
else:
lowercase__ = None
# 1. compute alphas, betas
lowercase__ = state.common.alphas_cumprod[t]
lowercase__ = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowercase__ = 1 - alpha_prod_t
lowercase__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowercase__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowercase__ = model_output
elif self.config.prediction_type == "v_prediction":
lowercase__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowercase__ = jnp.clip(_lowercase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowercase__ = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowercase__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowercase__ = jax.random.split(_lowercase , num=1 )
lowercase__ = jax.random.normal(_lowercase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_lowercase , _lowercase , predicted_variance=_lowercase ) ** 0.5) * noise
lowercase__ = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowercase__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_lowercase , state=_lowercase )
def UpperCAmelCase ( self :int , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , _lowercase , _lowercase , _lowercase )
def UpperCAmelCase ( self :Dict , _lowercase :DDPMSchedulerState , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , _lowercase :jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , _lowercase , _lowercase , _lowercase )
def __len__( self :List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 655 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowerCAmelCase = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
lowerCAmelCase = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
lowerCAmelCase = model(_lowercase )['last_hidden_state']
lowerCAmelCase = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
lowerCAmelCase = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 284 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_snake_case = logging.get_logger(__name__)
_snake_case = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_snake_case = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCAmelCase :
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(lowercase_ )} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
__lowerCamelCase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
__lowerCamelCase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
__lowerCamelCase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase = field(
default=lowercase_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
__lowerCamelCase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
__lowerCamelCase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
__lowerCamelCase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'train'
__lowerCamelCase = 'dev'
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
__lowerCamelCase = 42
def __init__( self :Optional[Any] , _lowercase :SquadDataTrainingArguments , _lowercase :PreTrainedTokenizer , _lowercase :Optional[int] = None , _lowercase :Union[str, Split] = Split.train , _lowercase :Optional[bool] = False , _lowercase :Optional[str] = None , _lowercase :Optional[str] = "pt" , ):
'''simple docstring'''
lowercase__ = args
lowercase__ = is_language_sensitive
lowercase__ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(_lowercase , _lowercase ):
try:
lowercase__ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowercase__ = mode
# Load data features from cache or dataset file
lowercase__ = "v2" if args.version_2_with_negative else "v1"
lowercase__ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + ".lock"
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not args.overwrite_cache:
lowercase__ = time.time()
lowercase__ = torch.load(_lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowercase__ = self.old_features["features"]
lowercase__ = self.old_features.get("dataset" , _lowercase )
lowercase__ = self.old_features.get("examples" , _lowercase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
lowercase__ = self.processor.get_dev_examples(args.data_dir )
else:
lowercase__ = self.processor.get_train_examples(args.data_dir )
lowercase__ , lowercase__ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=_lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_lowercase , )
lowercase__ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , _lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self :Dict ):
'''simple docstring'''
return len(self.features )
def __getitem__( self :Any , _lowercase :Any ):
'''simple docstring'''
lowercase__ = self.features[i]
lowercase__ = torch.tensor(feature.input_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowercase__ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowercase__ = torch.tensor(feature.cls_index , dtype=torch.long )
lowercase__ = torch.tensor(feature.p_mask , dtype=torch.float )
lowercase__ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowercase__ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowercase__ = torch.tensor(feature.start_position , dtype=torch.long )
lowercase__ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 655 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCAmelCase_ ( lowercase_ ):
"""simple docstring"""
def __a ( self : Union[str, Any] ):
'''simple docstring'''
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowercase , """width_multiplier""" ) )
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1_3 , SCREAMING_SNAKE_CASE__ : Any=6_4 , SCREAMING_SNAKE_CASE__ : Optional[int]=2 , SCREAMING_SNAKE_CASE__ : List[str]=3 , SCREAMING_SNAKE_CASE__ : Tuple="swish" , SCREAMING_SNAKE_CASE__ : int=3 , SCREAMING_SNAKE_CASE__ : str=3_2 , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : int=0.0_2 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[str]=1_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Dict=0.2_5 , SCREAMING_SNAKE_CASE__ : List[Any]=0.0 , SCREAMING_SNAKE_CASE__ : str=0.0 , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(5_1_2 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def __a ( self : Tuple ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __a ( self : Dict ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __a ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
__a = MobileViTVaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__a = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
__a = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __a ( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
'''simple docstring'''
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
__a = model(_lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __a ( self : List[Any] ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
a_ :Optional[Any] =(
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a_ :Optional[Any] =(
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ :List[str] =False
a_ :Optional[Any] =False
a_ :Optional[Any] =False
a_ :str =False
def __a ( self : List[Any] ):
'''simple docstring'''
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def __a ( self : int ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def __a ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def __a ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def __a ( self : int ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def __a ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __a ( self : List[str] ):
'''simple docstring'''
pass
def __a ( self : Union[str, Any] ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def __a ( self : List[Any] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __a ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__a = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_lowercase , _lowercase ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(_lowercase ) , _lowercase )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(_lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def __a ( self : List[str] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def __a ( self : Tuple ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
@slow
def __a ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __lowercase ( ) -> Optional[Any]:
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __a ( self : Tuple ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def __a ( self : str ):
'''simple docstring'''
__a = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
_lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
__a = model(**_lowercase )
# verify the logits
__a = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _lowercase )
__a = torch.tensor([-1.63_36E00, -7.32_04E-02, -5.18_83E-01] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(_lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
__a = model(**_lowercase )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , _lowercase )
__a = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=_lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def __a ( self : Union[str, Any] ):
'''simple docstring'''
__a = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = model.to(_lowercase )
__a = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
__a = prepare_img()
__a = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
__a = model(**_lowercase )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=_lowercase , target_sizes=[(5_0, 6_0)] )
__a = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , _lowercase )
__a = image_processor.post_process_semantic_segmentation(outputs=_lowercase )
__a = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , _lowercase )
| 582 |
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = """▁"""
_snake_case = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_snake_case = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_snake_case = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_snake_case = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
_snake_case = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = ["input_ids"]
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = RESOURCE_FILES_NAMES
def __init__( self :Union[str, Any] , _lowercase :Union[str, Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=False , _lowercase :Dict="utf8" , _lowercase :Optional[Any]="[UNK]" , _lowercase :Optional[int]="[SEP]" , _lowercase :List[str]="[PAD]" , _lowercase :Dict="[CLS]" , _lowercase :Optional[Any]="[MASK]" , _lowercase :Optional[Dict[str, Any]] = None , **_lowercase :Tuple , ):
'''simple docstring'''
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , vocab_file=_lowercase , encoding=_lowercase , sp_model_kwargs=self.sp_model_kwargs , **_lowercase , )
lowercase__ = do_lower_case
lowercase__ = sentencepiece_model_ckpt
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_lowercase )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
lowercase__ = self.load_vocab(filepath=_lowercase )
else:
lowercase__ = {self.sp_model.id_to_piece(_lowercase ): id for id in range(self.sp_model.get_piece_size() )}
lowercase__ = {v: k for k, v in self.vocab.items()}
def UpperCAmelCase ( self :Any , _lowercase :Dict ):
'''simple docstring'''
if text is None:
return None
lowercase__ = self.tokenize(_lowercase )
lowercase__ , lowercase__ = "", []
for i, ch in enumerate(_lowercase ):
if ch in self.SP_CHAR_MAPPING:
lowercase__ = self.SP_CHAR_MAPPING.get(_lowercase )
else:
lowercase__ = unicodedata.normalize("NFKC" , _lowercase )
if self.is_whitespace(_lowercase ):
continue
normalized_text += ch
char_mapping.extend([i] * len(_lowercase ) )
lowercase__ , lowercase__ , lowercase__ = normalized_text, [], 0
if self.do_lower_case:
lowercase__ = text.lower()
for token in split_tokens:
if token[:1] == "▁":
lowercase__ = token[1:]
lowercase__ = text[offset:].index(_lowercase ) + offset
lowercase__ = start + len(_lowercase )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
lowercase__ = end
return token_mapping
@property
def UpperCAmelCase ( self :Optional[int] ):
'''simple docstring'''
return len(self.vocab )
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self :Any ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self :Optional[Any] , _lowercase :Dict ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def UpperCAmelCase ( self :List[str] , _lowercase :Optional[Any] ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(_lowercase , _lowercase ) for c in text) )
def UpperCAmelCase ( self :str , _lowercase :int , _lowercase :Union[str, Any]=False , _lowercase :Optional[int]=64 , _lowercase :Any=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
lowercase__ = True
if self.sp_model_kwargs.get("alpha" ) is not None:
lowercase__ = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
lowercase__ = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
lowercase__ = self.sp_model.EncodeAsPieces(_lowercase )
else:
lowercase__ = self.sp_model.SampleEncodeAsPieces(_lowercase , _lowercase , _lowercase )
lowercase__ = []
for pi, piece in enumerate(_lowercase ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(_lowercase ) and pi != 0:
new_pieces.append(_lowercase )
continue
else:
continue
lowercase__ = 0
for i, chunk in enumerate(_lowercase ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(_lowercase ) or self.is_punct(_lowercase ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(_lowercase )
lowercase__ = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
lowercase__ = i
if len(_lowercase ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def UpperCAmelCase ( self :Tuple , _lowercase :Tuple ):
'''simple docstring'''
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Any , _lowercase :str ):
'''simple docstring'''
lowercase__ = self.convert_ids_to_tokens(_lowercase )
lowercase__ = "".join(_lowercase ).replace(_lowercase , " " ).strip()
return out_string
def UpperCAmelCase ( self :Union[str, Any] , _lowercase :Optional[int] ):
'''simple docstring'''
return self.vocab.get(_lowercase , self.vocab.get(self.unk_token ) )
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
return self.reverse_vocab.get(_lowercase , self.unk_token )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :Tuple=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase__ = [self.cls_token_id]
lowercase__ = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def UpperCAmelCase ( self :Dict , _lowercase :int , _lowercase :Union[str, Any]=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def UpperCAmelCase ( self :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Dict=None , _lowercase :Optional[Any]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_lowercase )) + [1, 1] + ([0] * len(_lowercase )) + [1]
return [1] + ([0] * len(_lowercase )) + [1]
def UpperCAmelCase ( self :int , _lowercase :List[int] , _lowercase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
# [CLS] X [SEP]
return (len(_lowercase ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(_lowercase ) + 1) + [1] * (len(_lowercase ) + 3)
def UpperCAmelCase ( self :str , _lowercase :Optional[int] ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def UpperCAmelCase ( self :Tuple , _lowercase :List[str] ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Dict ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def UpperCAmelCase ( self :List[str] , _lowercase :List[str] ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(_lowercase ) == 1:
lowercase__ = unicodedata.category(_lowercase )
if cat == "Zs":
return True
return False
def UpperCAmelCase ( self :int , _lowercase :Optional[int] ):
'''simple docstring'''
lowercase__ = {}
with io.open(_lowercase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(_lowercase ):
lowercase__ = line.rstrip("\n" )
lowercase__ = int(_lowercase )
return token_to_idx
def UpperCAmelCase ( self :List[str] , _lowercase :str , _lowercase :Optional[str] = None ):
'''simple docstring'''
lowercase__ = 0
if os.path.isdir(_lowercase ):
lowercase__ = os.path.join(
_lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
lowercase__ = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(_lowercase , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda _lowercase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
lowercase__ = token_index
writer.write(token + "\n" )
index += 1
lowercase__ = os.path.join(_lowercase , "sentencepiece.bpe.model" )
with open(_lowercase , "wb" ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(_lowercase )
return (vocab_file,)
| 655 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, snake_case__, snake_case__=7, snake_case__=3, snake_case__=30, snake_case__=4_00, snake_case__=True, snake_case__=None, snake_case__=True, snake_case__=[0.5, 0.5, 0.5], snake_case__=[0.5, 0.5, 0.5], snake_case__=True, snake_case__=1 / 2_55, snake_case__=True, ) -> int:
"""simple docstring"""
lowercase_ : str = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
lowercase_ : List[str] = parent
lowercase_ : Union[str, Any] = batch_size
lowercase_ : List[Any] = num_channels
lowercase_ : str = min_resolution
lowercase_ : Optional[int] = max_resolution
lowercase_ : List[Any] = do_resize
lowercase_ : str = size
lowercase_ : Optional[int] = do_normalize
lowercase_ : Dict = image_mean
lowercase_ : List[str] = image_std
lowercase_ : int = do_rescale
lowercase_ : Optional[Any] = rescale_factor
lowercase_ : List[Any] = do_pad
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def snake_case__ ( self, snake_case__, snake_case__=False ) -> Any:
"""simple docstring"""
if not batched:
lowercase_ : List[str] = image_inputs[0]
if isinstance(_lowercase, Image.Image ):
lowercase_ , lowercase_ : Optional[int] = image.size
else:
lowercase_ , lowercase_ : str = image.shape[1], image.shape[2]
if w < h:
lowercase_ : Any = int(self.size["""shortest_edge"""] * h / w )
lowercase_ : List[Any] = self.size["""shortest_edge"""]
elif w > h:
lowercase_ : int = self.size["""shortest_edge"""]
lowercase_ : Union[str, Any] = int(self.size["""shortest_edge"""] * w / h )
else:
lowercase_ : List[Any] = self.size["""shortest_edge"""]
lowercase_ : int = self.size["""shortest_edge"""]
else:
lowercase_ : Tuple = []
for image in image_inputs:
lowercase_ , lowercase_ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase_ : Optional[int] = max(_lowercase, key=lambda snake_case__ : item[0] )[0]
lowercase_ : str = max(_lowercase, key=lambda snake_case__ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCamelCase__ ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
__a : List[str] = DetaImageProcessor if is_vision_available() else None
def snake_case__ ( self ) -> str:
"""simple docstring"""
lowercase_ : Any = DetaImageProcessingTester(self )
@property
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
lowercase_ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase, """image_mean""" ) )
self.assertTrue(hasattr(_lowercase, """image_std""" ) )
self.assertTrue(hasattr(_lowercase, """do_normalize""" ) )
self.assertTrue(hasattr(_lowercase, """do_resize""" ) )
self.assertTrue(hasattr(_lowercase, """do_rescale""" ) )
self.assertTrue(hasattr(_lowercase, """do_pad""" ) )
self.assertTrue(hasattr(_lowercase, """size""" ) )
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
lowercase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad, _lowercase )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
pass
def snake_case__ ( self ) -> int:
"""simple docstring"""
lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Any = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase, Image.Image )
# Test not batched input
lowercase_ : Optional[int] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
lowercase_ , lowercase_ : Optional[int] = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase_ , lowercase_ : Dict = self.image_processor_tester.get_expected_values(_lowercase, batched=_lowercase )
lowercase_ : Optional[int] = image_processing(_lowercase, return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__ ( self ) -> str:
"""simple docstring"""
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowercase, numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase, np.ndarray )
# Test not batched input
lowercase_ : Tuple = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
lowercase_ , lowercase_ : Tuple = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase_ : Optional[int] = image_processing(_lowercase, return_tensors="""pt""" ).pixel_values
lowercase_ , lowercase_ : Dict = self.image_processor_tester.get_expected_values(_lowercase, batched=_lowercase )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
def snake_case__ ( self ) -> int:
"""simple docstring"""
lowercase_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowercase, torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase, torch.Tensor )
# Test not batched input
lowercase_ : Optional[Any] = image_processing(image_inputs[0], return_tensors="""pt""" ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(_lowercase )
self.assertEqual(
encoded_images.shape, (1, self.image_processor_tester.num_channels, expected_height, expected_width), )
# Test batched
lowercase_ : Optional[Any] = image_processing(_lowercase, return_tensors="""pt""" ).pixel_values
lowercase_ , lowercase_ : Optional[int] = self.image_processor_tester.get_expected_values(_lowercase, batched=_lowercase )
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
), )
@slow
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""", """r""" ) as f:
lowercase_ : str = json.loads(f.read() )
lowercase_ : str = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
lowercase_ : str = DetaImageProcessor()
lowercase_ : Optional[int] = image_processing(images=_lowercase, annotations=_lowercase, return_tensors="""pt""" )
# verify pixel values
lowercase_ : Tuple = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape, _lowercase )
lowercase_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3], _lowercase, atol=1E-4 ) )
# verify area
lowercase_ : Union[str, Any] = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""], _lowercase ) )
# verify boxes
lowercase_ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape, _lowercase )
lowercase_ : int = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0], _lowercase, atol=1E-3 ) )
# verify image_id
lowercase_ : int = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""], _lowercase ) )
# verify is_crowd
lowercase_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""], _lowercase ) )
# verify class_labels
lowercase_ : Dict = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""], _lowercase ) )
# verify orig_size
lowercase_ : Tuple = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""], _lowercase ) )
# verify size
lowercase_ : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""], _lowercase ) )
@slow
def snake_case__ ( self ) -> Any:
"""simple docstring"""
lowercase_ : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""", """r""" ) as f:
lowercase_ : Dict = json.loads(f.read() )
lowercase_ : Dict = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
lowercase_ : Dict = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
lowercase_ : Union[str, Any] = DetaImageProcessor(format="""coco_panoptic""" )
lowercase_ : Any = image_processing(images=_lowercase, annotations=_lowercase, masks_path=_lowercase, return_tensors="""pt""" )
# verify pixel values
lowercase_ : Optional[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape, _lowercase )
lowercase_ : Optional[Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3], _lowercase, atol=1E-4 ) )
# verify area
lowercase_ : Tuple = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""], _lowercase ) )
# verify boxes
lowercase_ : List[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape, _lowercase )
lowercase_ : Optional[Any] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0], _lowercase, atol=1E-3 ) )
# verify image_id
lowercase_ : Optional[int] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""], _lowercase ) )
# verify is_crowd
lowercase_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""], _lowercase ) )
# verify class_labels
lowercase_ : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""], _lowercase ) )
# verify masks
lowercase_ : List[str] = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item(), _lowercase )
# verify orig_size
lowercase_ : Union[str, Any] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""], _lowercase ) )
# verify size
lowercase_ : Dict = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""], _lowercase ) ) | 458 |
def _A ( __magic_name__ ):
lowercase__ = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def _A ( __magic_name__ = 100 ):
lowercase__ = 1
lowercase__ = 2
for i in range(2 , max_n + 1 ):
lowercase__ = pre_numerator
lowercase__ = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ = cur_numerator
lowercase__ = e_cont * pre_numerator + temp
return sum_digits(__magic_name__ )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 655 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( lowercase_):
"""simple docstring"""
def __lowercase ( self : Dict ) -> List[str]:
lowerCAmelCase_ : Optional[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowercase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_lowercase , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(_lowercase , """num_attention_heads""" ) )
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]=13 , lowerCamelCase : List[Any]=32 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : int=3 , lowerCamelCase : Optional[Any]=6_40 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : Optional[Any]="silu" , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : str=32 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : int=0.02 , lowerCamelCase : List[Any]=True , lowerCamelCase : List[Any]=True , lowerCamelCase : str=10 , lowerCamelCase : str=None , ) -> Tuple:
lowerCAmelCase_ : Dict = parent
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : List[str] = image_size
lowerCAmelCase_ : Optional[Any] = patch_size
lowerCAmelCase_ : Union[str, Any] = num_channels
lowerCAmelCase_ : Dict = last_hidden_size
lowerCAmelCase_ : List[str] = num_attention_heads
lowerCAmelCase_ : List[str] = hidden_act
lowerCAmelCase_ : List[str] = conv_kernel_size
lowerCAmelCase_ : Dict = output_stride
lowerCAmelCase_ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase_ : Dict = attention_probs_dropout_prob
lowerCAmelCase_ : Tuple = classifier_dropout_prob
lowerCAmelCase_ : Optional[Any] = use_labels
lowerCAmelCase_ : Dict = is_training
lowerCAmelCase_ : Dict = num_labels
lowerCAmelCase_ : Optional[Any] = initializer_range
lowerCAmelCase_ : Dict = scope
def __lowercase ( self : Dict ) -> List[str]:
lowerCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[int] = None
if self.use_labels:
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCAmelCase_ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase_ : str = self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self : Optional[Any] ) -> Dict:
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self : int , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : List[Any] ) -> List[Any]:
lowerCAmelCase_ : Tuple = MobileViTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowercase ( self : str , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Dict ) -> Any:
lowerCAmelCase_ : Optional[int] = self.num_labels
lowerCAmelCase_ : Union[str, Any] = MobileViTForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase_ : Dict = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self : Optional[int] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : int ) -> int:
lowerCAmelCase_ : List[str] = self.num_labels
lowerCAmelCase_ : Dict = MobileViTForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
lowerCAmelCase_ : Any = model(_lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCAmelCase_ : Any = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowercase ( self : Tuple ) -> Any:
lowerCAmelCase_ : Dict = self.prepare_config_and_inputs()
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Dict = config_and_inputs
lowerCAmelCase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( lowercase_ ,lowercase_ ,unittest.TestCase):
"""simple docstring"""
lowercase = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def __lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase_ : List[str] = MobileViTModelTester(self )
lowerCAmelCase_ : Any = MobileViTConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def __lowercase ( self : Tuple ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def __lowercase ( self : List[str] ) -> Tuple:
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def __lowercase ( self : str ) -> Optional[Any]:
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def __lowercase ( self : Tuple ) -> List[str]:
pass
def __lowercase ( self : Optional[Any] ) -> int:
lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(_lowercase )
lowerCAmelCase_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Any = [*signature.parameters.keys()]
lowerCAmelCase_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowercase ( self : str ) -> int:
pass
def __lowercase ( self : List[str] ) -> Any:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __lowercase ( self : Optional[int] ) -> Optional[Any]:
def check_hidden_states_output(lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict ):
lowerCAmelCase_ : Dict = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
lowerCAmelCase_ : Tuple = model(**self._prepare_for_class(_lowercase , _lowercase ) )
lowerCAmelCase_ : str = outputs.hidden_states
lowerCAmelCase_ : List[str] = 5
self.assertEqual(len(_lowercase ) , _lowercase )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCAmelCase_ : str = 2
for i in range(len(_lowercase ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCAmelCase_, lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Dict = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ : List[str] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def __lowercase ( self : str ) -> Optional[int]:
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def __lowercase ( self : int ) -> int:
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
@slow
def __lowercase ( self : str ) -> Any:
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Dict = MobileViTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@cached_property
def __lowercase ( self : Dict ) -> List[str]:
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def __lowercase ( self : int ) -> Tuple:
lowerCAmelCase_ : Dict = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(_lowercase )
lowerCAmelCase_ : List[Any] = self.default_image_processor
lowerCAmelCase_ : Any = prepare_img()
lowerCAmelCase_ : str = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : List[Any] = model(**_lowercase )
# verify the logits
lowerCAmelCase_ : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowercase )
lowerCAmelCase_ : Any = torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
@slow
def __lowercase ( self : Tuple ) -> str:
lowerCAmelCase_ : str = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCAmelCase_ : Tuple = model.to(_lowercase )
lowerCAmelCase_ : Optional[int] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCAmelCase_ : Optional[int] = prepare_img()
lowerCAmelCase_ : Optional[Any] = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : int = model(**_lowercase )
lowerCAmelCase_ : int = outputs.logits
# verify the logits
lowerCAmelCase_ : Any = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _lowercase )
lowerCAmelCase_ : Any = torch.tensor(
[
[[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]],
[[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]],
[[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]],
] , device=_lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _lowercase , atol=1E-4 ) )
@slow
def __lowercase ( self : List[Any] ) -> Optional[Any]:
lowerCAmelCase_ : Optional[Any] = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCAmelCase_ : Union[str, Any] = model.to(_lowercase )
lowerCAmelCase_ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
lowerCAmelCase_ : Any = prepare_img()
lowerCAmelCase_ : Any = image_processor(images=_lowercase , return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : str = model(**_lowercase )
lowerCAmelCase_ : List[str] = outputs.logits.detach().cpu()
lowerCAmelCase_ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=_lowercase , target_sizes=[(50, 60)] )
lowerCAmelCase_ : Tuple = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _lowercase )
lowerCAmelCase_ : List[str] = image_processor.post_process_semantic_segmentation(outputs=_lowercase )
lowerCAmelCase_ : Any = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _lowercase )
| 275 |
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_snake_case = logging.get_logger(__name__)
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'AutoTokenizer'
__lowerCamelCase = ['tokenizer']
__lowerCamelCase = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self :Dict , _lowercase :List[str] , _lowercase :List[Any]=None ):
'''simple docstring'''
super().__init__(_lowercase )
lowercase__ = speaker_embeddings
@classmethod
def UpperCAmelCase ( cls :Any , _lowercase :int , _lowercase :str="speaker_embeddings_path.json" , **_lowercase :List[str] ):
'''simple docstring'''
if speaker_embeddings_dict_path is not None:
lowercase__ = get_file_from_repo(
_lowercase , _lowercase , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
f'''`{os.path.join(_lowercase , _lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
lowercase__ = None
else:
with open(_lowercase ) as speaker_embeddings_json:
lowercase__ = json.load(_lowercase )
else:
lowercase__ = None
lowercase__ = AutoTokenizer.from_pretrained(_lowercase , **_lowercase )
return cls(tokenizer=_lowercase , speaker_embeddings=_lowercase )
def UpperCAmelCase ( self :Any , _lowercase :Any , _lowercase :List[str]="speaker_embeddings_path.json" , _lowercase :Any="speaker_embeddings" , _lowercase :bool = False , **_lowercase :Any , ):
'''simple docstring'''
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_lowercase , _lowercase , "v2" ) , exist_ok=_lowercase )
lowercase__ = {}
lowercase__ = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
lowercase__ = self._load_voice_preset(_lowercase )
lowercase__ = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"] , _lowercase , f'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=_lowercase , )
lowercase__ = os.path.join(_lowercase , f'''{prompt_key}_{key}.npy''' )
lowercase__ = tmp_dict
with open(os.path.join(_lowercase , _lowercase ) , "w" ) as fp:
json.dump(_lowercase , _lowercase )
super().save_pretrained(_lowercase , _lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :str = None , **_lowercase :List[Any] ):
'''simple docstring'''
lowercase__ = self.speaker_embeddings[voice_preset]
lowercase__ = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
lowercase__ = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] , subfolder=kwargs.pop("subfolder" , _lowercase ) , cache_dir=kwargs.pop("cache_dir" , _lowercase ) , force_download=kwargs.pop("force_download" , _lowercase ) , proxies=kwargs.pop("proxies" , _lowercase ) , resume_download=kwargs.pop("resume_download" , _lowercase ) , local_files_only=kwargs.pop("local_files_only" , _lowercase ) , use_auth_token=kwargs.pop("use_auth_token" , _lowercase ) , revision=kwargs.pop("revision" , _lowercase ) , )
if path is None:
raise ValueError(
f'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
lowercase__ = np.load(_lowercase )
return voice_preset_dict
def UpperCAmelCase ( self :Optional[int] , _lowercase :Optional[dict] = None ):
'''simple docstring'''
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self :Optional[Any] , _lowercase :Optional[Any]=None , _lowercase :List[str]=None , _lowercase :List[str]="pt" , _lowercase :List[Any]=2_56 , _lowercase :List[str]=False , _lowercase :Union[str, Any]=True , _lowercase :Dict=False , **_lowercase :Tuple , ):
'''simple docstring'''
if voice_preset is not None and not isinstance(_lowercase , _lowercase ):
if (
isinstance(_lowercase , _lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
lowercase__ = self._load_voice_preset(_lowercase )
else:
if isinstance(_lowercase , _lowercase ) and not voice_preset.endswith(".npz" ):
lowercase__ = voice_preset + ".npz"
lowercase__ = np.load(_lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(_lowercase , **_lowercase )
lowercase__ = BatchFeature(data=_lowercase , tensor_type=_lowercase )
lowercase__ = self.tokenizer(
_lowercase , return_tensors=_lowercase , padding="max_length" , max_length=_lowercase , return_attention_mask=_lowercase , return_token_type_ids=_lowercase , add_special_tokens=_lowercase , **_lowercase , )
if voice_preset is not None:
lowercase__ = voice_preset
return encoded_text
| 655 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCamelCase_ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Dict , _a : TransformeraDModel , _a : AutoencoderKL , _a : KarrasDiffusionSchedulers , _a : Optional[Dict[int, str]] = None , ) -> Tuple:
super().__init__()
self.register_modules(transformer=_lowercase , vae=_lowercase , scheduler=_lowercase )
# create a imagenet -> id dictionary for easier use
__lowerCamelCase : List[Any] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(',' ):
__lowerCamelCase : Optional[int] = int(_lowercase )
__lowerCamelCase : List[Any] = dict(sorted(self.labels.items() ) )
def _lowercase ( self : Optional[int] , _a : Union[str, List[str]] ) -> Any:
if not isinstance(_lowercase , _lowercase ):
__lowerCamelCase : Any = list(_lowercase )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : Optional[Any] , _a : List[int] , _a : float = 4.0 , _a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _a : int = 50 , _a : Optional[str] = "pil" , _a : bool = True , ) -> List[Any]:
__lowerCamelCase : Optional[Any] = len(_lowercase )
__lowerCamelCase : Union[str, Any] = self.transformer.config.sample_size
__lowerCamelCase : List[Any] = self.transformer.config.in_channels
__lowerCamelCase : Optional[Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_lowercase , device=self.device , dtype=self.transformer.dtype , )
__lowerCamelCase : List[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__lowerCamelCase : Tuple = torch.tensor(_lowercase , device=self.device ).reshape(-1 )
__lowerCamelCase : Tuple = torch.tensor([1000] * batch_size , device=self.device )
__lowerCamelCase : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__lowerCamelCase : Tuple = latent_model_input[: len(_lowercase ) // 2]
__lowerCamelCase : List[str] = torch.cat([half, half] , dim=0 )
__lowerCamelCase : Any = self.scheduler.scale_model_input(_lowercase , _lowercase )
__lowerCamelCase : List[str] = t
if not torch.is_tensor(_lowercase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__lowerCamelCase : int = latent_model_input.device.type == 'mps'
if isinstance(_lowercase , _lowercase ):
__lowerCamelCase : Optional[Any] = torch.floataa if is_mps else torch.floataa
else:
__lowerCamelCase : Tuple = torch.intaa if is_mps else torch.intaa
__lowerCamelCase : Optional[Any] = torch.tensor([timesteps] , dtype=_lowercase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__lowerCamelCase : Union[str, Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase : List[Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__lowerCamelCase : Tuple = self.transformer(
_lowercase , timestep=_lowercase , class_labels=_lowercase ).sample
# perform guidance
if guidance_scale > 1:
__lowerCamelCase ,__lowerCamelCase : List[Any] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__lowerCamelCase ,__lowerCamelCase : Any = torch.split(_lowercase , len(_lowercase ) // 2 , dim=0 )
__lowerCamelCase : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__lowerCamelCase : str = torch.cat([half_eps, half_eps] , dim=0 )
__lowerCamelCase : Any = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__lowerCamelCase ,__lowerCamelCase : Optional[Any] = torch.split(_lowercase , _lowercase , dim=1 )
else:
__lowerCamelCase : Union[str, Any] = noise_pred
# compute previous image: x_t -> x_t-1
__lowerCamelCase : Tuple = self.scheduler.step(_lowercase , _lowercase , _lowercase ).prev_sample
if guidance_scale > 1:
__lowerCamelCase ,__lowerCamelCase : Union[str, Any] = latent_model_input.chunk(2 , dim=0 )
else:
__lowerCamelCase : Union[str, Any] = latent_model_input
__lowerCamelCase : Any = 1 / self.vae.config.scaling_factor * latents
__lowerCamelCase : str = self.vae.decode(_lowercase ).sample
__lowerCamelCase : str = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__lowerCamelCase : str = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCamelCase : int = self.numpy_to_pil(_lowercase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_lowercase )
| 459 |
import math
import random
def _A ( __magic_name__ , __magic_name__ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.02
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(__magic_name__ ):
# Forward propagation
lowercase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowercase__ = (expected / 100) - layer_a
# Error delta
lowercase__ = layer_1_error * sigmoid_function(__magic_name__ , __magic_name__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("""Expected value: """))
_snake_case = int(input("""Number of propagations: """))
print(forward_propagation(expected, number_propagations))
| 655 | 0 |
"""simple docstring"""
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( __snake_case : List[str] ):
"""simple docstring"""
_lowerCamelCase : Any = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
_lowerCamelCase : Tuple = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , __snake_case )
if matches:
_lowerCamelCase : Dict = float(matches[1] )
_lowerCamelCase : Any = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
_lowerCamelCase : Tuple = 1001
_lowerCamelCase : Union[str, Any] = """imagenet-1k-id2label.json"""
_lowerCamelCase : Union[str, Any] = """huggingface/label-files"""
_lowerCamelCase : Optional[Any] = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="""dataset""" ) , """r""" ) )
_lowerCamelCase : int = {int(__snake_case ) + 1: v for k, v in idalabel.items()}
_lowerCamelCase : Any = """background"""
_lowerCamelCase : Tuple = idalabel
_lowerCamelCase : Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCamelCase : Optional[int] = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def _snake_case ( __snake_case : str , __snake_case : Dict , __snake_case : int , __snake_case : Optional[Any]=False ):
"""simple docstring"""
_lowerCamelCase : List[str] = get_mobilenet_va_config(__snake_case )
# Load 🤗 model
_lowerCamelCase : Optional[int] = MobileNetVaForImageClassification(__snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__snake_case , __snake_case , __snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
_lowerCamelCase : Any = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
_lowerCamelCase : Any = image_processor(images=prepare_img() , return_tensors="""pt""" )
_lowerCamelCase : Optional[Any] = model(**__snake_case )
_lowerCamelCase : int = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
_lowerCamelCase : str = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
_lowerCamelCase : List[Any] = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
_lowerCamelCase : Any = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1E-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__snake_case )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print("""Pushing to the hub...""" )
_lowerCamelCase : List[str] = """google/""" + model_name
image_processor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""mobilenet_v1_1.0_224""",
type=str,
help="""Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.""",
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original TensorFlow checkpoint (.ckpt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 88 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = 'van'
def __init__( self :Optional[Any] , _lowercase :Dict=2_24 , _lowercase :Union[str, Any]=3 , _lowercase :List[Any]=[7, 3, 3, 3] , _lowercase :Any=[4, 2, 2, 2] , _lowercase :Union[str, Any]=[64, 1_28, 3_20, 5_12] , _lowercase :List[Any]=[3, 3, 12, 3] , _lowercase :Dict=[8, 8, 4, 4] , _lowercase :int="gelu" , _lowercase :List[Any]=0.02 , _lowercase :List[Any]=1e-6 , _lowercase :Any=1e-2 , _lowercase :int=0.0 , _lowercase :int=0.0 , **_lowercase :Dict , ):
'''simple docstring'''
super().__init__(**_lowercase )
lowercase__ = image_size
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = strides
lowercase__ = hidden_sizes
lowercase__ = depths
lowercase__ = mlp_ratios
lowercase__ = hidden_act
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
lowercase__ = layer_scale_init_value
lowercase__ = drop_path_rate
lowercase__ = dropout_rate
| 655 | 0 |
A_ : List[str] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
# Return True if there is node that has not iterated.
UpperCamelCase_: List[str] = [False] * len(UpperCAmelCase__ )
UpperCamelCase_: Dict = [s]
UpperCamelCase_: List[str] = True
while queue:
UpperCamelCase_: Optional[int] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCAmelCase__ )
UpperCamelCase_: Tuple = True
UpperCamelCase_: str = u
return visited[t]
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: int = [-1] * (len(UpperCAmelCase__ ))
UpperCamelCase_: List[str] = 0
UpperCamelCase_: Any = []
UpperCamelCase_: Union[str, Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
UpperCamelCase_: Tuple = float('Inf' )
UpperCamelCase_: Optional[int] = sink
while s != source:
# Find the minimum value in select path
UpperCamelCase_: int = min(UpperCAmelCase__ , graph[parent[s]][s] )
UpperCamelCase_: Tuple = parent[s]
max_flow += path_flow
UpperCamelCase_: Dict = sink
while v != source:
UpperCamelCase_: Any = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
UpperCamelCase_: Optional[int] = parent[v]
for i in range(len(UpperCAmelCase__ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5)) | 57 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase ( enum.Enum ):
__lowerCamelCase = 0
__lowerCamelCase = 1
__lowerCamelCase = 2
@add_end_docstrings(lowercase_ )
class lowerCAmelCase ( lowercase_ ):
__lowerCamelCase = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self :Any , *_lowercase :Optional[Any] , **_lowercase :Union[str, Any] ):
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
lowercase__ = None
if self.model.config.prefix is not None:
lowercase__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
lowercase__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
lowercase__ , lowercase__ , lowercase__ = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
lowercase__ = {**self._preprocess_params, **preprocess_params}
lowercase__ = {**self._forward_params, **forward_params}
def UpperCAmelCase ( self :Tuple , _lowercase :Optional[Any]=None , _lowercase :List[Any]=None , _lowercase :List[str]=None , _lowercase :Optional[Any]=None , _lowercase :Optional[int]=None , _lowercase :Any=None , _lowercase :Any=None , _lowercase :Dict=None , **_lowercase :Union[str, Any] , ):
'''simple docstring'''
lowercase__ = {}
if prefix is not None:
lowercase__ = prefix
if prefix:
lowercase__ = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
" [None, 'hole']" )
lowercase__ = handle_long_generation
preprocess_params.update(_lowercase )
lowercase__ = generate_kwargs
lowercase__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`" )
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`" )
lowercase__ = ReturnType.TENSORS
if return_type is not None:
lowercase__ = return_type
if clean_up_tokenization_spaces is not None:
lowercase__ = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase__ = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
lowercase__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCAmelCase ( self :int , *_lowercase :Optional[int] , **_lowercase :List[str] ):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self :Union[str, Any] , _lowercase :Dict , **_lowercase :Tuple ):
'''simple docstring'''
return super().__call__(_lowercase , **_lowercase )
def UpperCAmelCase ( self :Optional[int] , _lowercase :Tuple , _lowercase :Optional[int]="" , _lowercase :Tuple=None , **_lowercase :List[str] ):
'''simple docstring'''
lowercase__ = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
lowercase__ = prompt_text
if handle_long_generation == "hole":
lowercase__ = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
lowercase__ = generate_kwargs["max_new_tokens"]
else:
lowercase__ = generate_kwargs.get("max_length" , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected" )
if cur_len + new_tokens > self.tokenizer.model_max_length:
lowercase__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length" )
lowercase__ = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
lowercase__ = inputs["attention_mask"][:, -keep_length:]
return inputs
def UpperCAmelCase ( self :str , _lowercase :int , **_lowercase :str ):
'''simple docstring'''
lowercase__ = model_inputs["input_ids"]
lowercase__ = model_inputs.get("attention_mask" , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
lowercase__ = None
lowercase__ = None
lowercase__ = 1
else:
lowercase__ = input_ids.shape[0]
lowercase__ = model_inputs.pop("prompt_text" )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
lowercase__ = generate_kwargs.pop("prefix_length" , 0 )
if prefix_length > 0:
lowercase__ = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
lowercase__ = generate_kwargs.get("max_length" ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
lowercase__ = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
lowercase__ = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
lowercase__ = generated_sequence.shape[0]
if self.framework == "pt":
lowercase__ = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
lowercase__ = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCAmelCase ( self :Any , _lowercase :Tuple , _lowercase :str=ReturnType.FULL_TEXT , _lowercase :Dict=True ):
'''simple docstring'''
lowercase__ = model_outputs["generated_sequence"][0]
lowercase__ = model_outputs["input_ids"]
lowercase__ = model_outputs["prompt_text"]
lowercase__ = generated_sequence.numpy().tolist()
lowercase__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
lowercase__ = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
lowercase__ = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
lowercase__ = 0
else:
lowercase__ = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
lowercase__ = prompt_text + text[prompt_length:]
else:
lowercase__ = text[prompt_length:]
lowercase__ = {"generated_text": all_text}
records.append(_lowercase )
return records
| 655 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ ( lowercase_ ):
def __init__( self : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any]=13 , snake_case__ : int=7 , snake_case__ : str=True , snake_case__ : Tuple=True , snake_case__ : List[Any]=True , snake_case__ : int=True , snake_case__ : Union[str, Any]=True , snake_case__ : Optional[int]=False , snake_case__ : int=False , snake_case__ : Tuple=False , snake_case__ : Tuple=2 , snake_case__ : List[str]=99 , snake_case__ : Optional[Any]=0 , snake_case__ : str=32 , snake_case__ : List[str]=5 , snake_case__ : Any=4 , snake_case__ : Optional[int]=0.1 , snake_case__ : Any=0.1 , snake_case__ : int=512 , snake_case__ : Tuple=12 , snake_case__ : Union[str, Any]=2 , snake_case__ : List[str]=0.02 , snake_case__ : Any=3 , snake_case__ : Optional[int]=4 , snake_case__ : List[str]="last" , snake_case__ : Optional[int]=None , snake_case__ : Optional[Any]=None , ):
lowerCamelCase_ : str =parent
lowerCamelCase_ : Tuple =batch_size
lowerCamelCase_ : Union[str, Any] =seq_length
lowerCamelCase_ : int =is_training
lowerCamelCase_ : Any =use_input_lengths
lowerCamelCase_ : Union[str, Any] =use_token_type_ids
lowerCamelCase_ : Optional[int] =use_labels
lowerCamelCase_ : Dict =gelu_activation
lowerCamelCase_ : Optional[Any] =sinusoidal_embeddings
lowerCamelCase_ : str =causal
lowerCamelCase_ : Optional[int] =asm
lowerCamelCase_ : Optional[int] =n_langs
lowerCamelCase_ : Optional[int] =vocab_size
lowerCamelCase_ : Any =n_special
lowerCamelCase_ : int =hidden_size
lowerCamelCase_ : str =num_hidden_layers
lowerCamelCase_ : Optional[int] =num_attention_heads
lowerCamelCase_ : Optional[int] =hidden_dropout_prob
lowerCamelCase_ : Tuple =attention_probs_dropout_prob
lowerCamelCase_ : List[str] =max_position_embeddings
lowerCamelCase_ : Any =type_vocab_size
lowerCamelCase_ : Optional[Any] =type_sequence_label_size
lowerCamelCase_ : Dict =initializer_range
lowerCamelCase_ : Optional[int] =num_labels
lowerCamelCase_ : Optional[int] =num_choices
lowerCamelCase_ : Union[str, Any] =summary_type
lowerCamelCase_ : Union[str, Any] =use_proj
lowerCamelCase_ : Optional[int] =scope
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ : Optional[int] =None
if self.use_input_lengths:
lowerCamelCase_ : Optional[int] =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ : Any =None
if self.use_token_type_ids:
lowerCamelCase_ : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ : Any =None
lowerCamelCase_ : str =None
lowerCamelCase_ : Union[str, Any] =None
if self.use_labels:
lowerCamelCase_ : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ : Optional[int] =ids_tensor([self.batch_size] , 2 ).float()
lowerCamelCase_ : Any =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ : Union[str, Any] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCAmelCase__ ( self : Optional[int] ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Optional[Any] , ):
lowerCamelCase_ : Dict =FlaubertModel(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase_ : str =model(_lowercase , lengths=_lowercase , langs=_lowercase )
lowerCamelCase_ : str =model(_lowercase , langs=_lowercase )
lowerCamelCase_ : Optional[int] =model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : List[str] , ):
lowerCamelCase_ : List[Any] =FlaubertWithLMHeadModel(_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase_ : Optional[Any] =model(_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , snake_case__ : int , snake_case__ : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : str , snake_case__ : Any , snake_case__ : List[str] , ):
lowerCamelCase_ : Dict =FlaubertForQuestionAnsweringSimple(_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase_ : Dict =model(_lowercase )
lowerCamelCase_ : Dict =model(_lowercase , start_positions=_lowercase , end_positions=_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : List[Any] , ):
lowerCamelCase_ : Union[str, Any] =FlaubertForQuestionAnswering(_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase_ : Dict =model(_lowercase )
lowerCamelCase_ : int =model(
_lowercase , start_positions=_lowercase , end_positions=_lowercase , cls_index=_lowercase , is_impossible=_lowercase , p_mask=_lowercase , )
lowerCamelCase_ : int =model(
_lowercase , start_positions=_lowercase , end_positions=_lowercase , cls_index=_lowercase , is_impossible=_lowercase , )
((lowerCamelCase_ ) , ) : List[str] =result_with_labels.to_tuple()
lowerCamelCase_ : Tuple =model(_lowercase , start_positions=_lowercase , end_positions=_lowercase )
((lowerCamelCase_ ) , ) : List[Any] =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def UpperCAmelCase__ ( self : Tuple , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Any , snake_case__ : str , snake_case__ : int , ):
lowerCamelCase_ : Optional[int] =FlaubertForSequenceClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase_ : Dict =model(_lowercase )
lowerCamelCase_ : str =model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : List[str] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , ):
lowerCamelCase_ : Dict =self.num_labels
lowerCamelCase_ : List[str] =FlaubertForTokenClassification(_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase_ : str =model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Any , ):
lowerCamelCase_ : int =self.num_choices
lowerCamelCase_ : int =FlaubertForMultipleChoice(config=_lowercase )
model.to(_lowercase )
model.eval()
lowerCamelCase_ : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Union[str, Any] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Union[str, Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ : Optional[Any] =model(
_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Tuple =self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) : List[Any] =config_and_inputs
lowerCamelCase_ : str ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowercase__ ( lowercase_, lowercase_, unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCAmelCase :str = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self : Optional[Any] , snake_case__ : Dict , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCAmelCase__ ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Dict=False ):
lowerCamelCase_ : Dict =super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowerCamelCase_ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
lowerCamelCase_ : Optional[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_lowercase )
return inputs_dict
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Optional[int] =FlaubertModelTester(self )
lowerCamelCase_ : Tuple =ConfigTester(self , config_class=_lowercase , emb_dim=37 )
def UpperCAmelCase__ ( self : List[str] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_lowercase )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_lowercase )
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_lowercase )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_lowercase )
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_lowercase )
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_lowercase )
def UpperCAmelCase__ ( self : List[Any] ):
lowerCamelCase_ : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_lowercase )
@slow
def UpperCAmelCase__ ( self : Dict ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Dict =FlaubertModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
@require_torch_gpu
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ , lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowerCamelCase_ : str =True
lowerCamelCase_ : Tuple =model_class(config=_lowercase )
lowerCamelCase_ : Any =self._prepare_for_class(_lowercase , _lowercase )
lowerCamelCase_ : List[Any] =torch.jit.trace(
_lowercase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_lowercase , os.path.join(_lowercase , "traced_model.pt" ) )
lowerCamelCase_ : List[Any] =torch.jit.load(os.path.join(_lowercase , "traced_model.pt" ) , map_location=_lowercase )
loaded(inputs_dict["input_ids"].to(_lowercase ) , inputs_dict["attention_mask"].to(_lowercase ) )
@require_torch
class lowercase__ ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : Optional[int] =FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
lowerCamelCase_ : List[str] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowerCamelCase_ : Tuple =model(_lowercase )[0]
lowerCamelCase_ : Tuple =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _lowercase )
lowerCamelCase_ : List[str] =torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1E-4 ) )
| 153 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_snake_case = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _A ( __magic_name__ ):
lowercase__ = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__magic_name__ )[0]
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(rows * cols * num_images )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
lowercase__ = data.reshape(__magic_name__ , __magic_name__ , __magic_name__ , 1 )
return data
@deprecated(__magic_name__ , "Please use tf.one_hot on tensors." )
def _A ( __magic_name__ , __magic_name__ ):
lowercase__ = labels_dense.shape[0]
lowercase__ = numpy.arange(__magic_name__ ) * num_classes
lowercase__ = numpy.zeros((num_labels, num_classes) )
lowercase__ = 1
return labels_one_hot
@deprecated(__magic_name__ , "Please use tf.data to implement this functionality." )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=__magic_name__ ) as bytestream:
lowercase__ = _readaa(__magic_name__ )
if magic != 2049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
lowercase__ = _readaa(__magic_name__ )
lowercase__ = bytestream.read(__magic_name__ )
lowercase__ = numpy.frombuffer(__magic_name__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__magic_name__ , __magic_name__ )
return labels
class lowerCAmelCase :
@deprecated(
_lowercase , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self :List[str] , _lowercase :Optional[Any] , _lowercase :Union[str, Any] , _lowercase :Tuple=False , _lowercase :str=False , _lowercase :Dict=dtypes.floataa , _lowercase :Optional[Any]=True , _lowercase :Any=None , ):
'''simple docstring'''
lowercase__ , lowercase__ = random_seed.get_seed(_lowercase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase__ = dtypes.as_dtype(_lowercase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
lowercase__ = 1_00_00
lowercase__ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'''images.shape: {images.shape} labels.shape: {labels.shape}'''
lowercase__ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase__ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase__ = images.astype(numpy.floataa )
lowercase__ = numpy.multiply(_lowercase , 1.0 / 255.0 )
lowercase__ = images
lowercase__ = labels
lowercase__ = 0
lowercase__ = 0
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase ( self :Union[str, Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase ( self :Dict ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase ( self :Tuple ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase ( self :str , _lowercase :Union[str, Any] , _lowercase :Any=False , _lowercase :Union[str, Any]=True ):
'''simple docstring'''
if fake_data:
lowercase__ = [1] * 7_84
lowercase__ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(_lowercase )],
[fake_label for _ in range(_lowercase )],
)
lowercase__ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perma]
lowercase__ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase__ = self._num_examples - start
lowercase__ = self._images[start : self._num_examples]
lowercase__ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase__ = numpy.arange(self._num_examples )
numpy.random.shuffle(_lowercase )
lowercase__ = self.images[perm]
lowercase__ = self.labels[perm]
# Start next epoch
lowercase__ = 0
lowercase__ = batch_size - rest_num_examples
lowercase__ = self._index_in_epoch
lowercase__ = self._images[start:end]
lowercase__ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase__ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__magic_name__ , "Please write your own downloading logic." )
def _A ( __magic_name__ , __magic_name__ , __magic_name__ ):
if not gfile.Exists(__magic_name__ ):
gfile.MakeDirs(__magic_name__ )
lowercase__ = os.path.join(__magic_name__ , __magic_name__ )
if not gfile.Exists(__magic_name__ ):
urllib.request.urlretrieve(__magic_name__ , __magic_name__ ) # noqa: S310
with gfile.GFile(__magic_name__ ) as f:
lowercase__ = f.size()
print("Successfully downloaded" , __magic_name__ , __magic_name__ , "bytes." )
return filepath
@deprecated(
__magic_name__ , "Please use alternatives such as:" " tensorflow_datasets.load('mnist')" )
def _A ( __magic_name__ , __magic_name__=False , __magic_name__=False , __magic_name__=dtypes.floataa , __magic_name__=True , __magic_name__=5000 , __magic_name__=None , __magic_name__=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__magic_name__ , one_hot=__magic_name__ , dtype=__magic_name__ , seed=__magic_name__ )
lowercase__ = fake()
lowercase__ = fake()
lowercase__ = fake()
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
if not source_url: # empty string check
lowercase__ = DEFAULT_SOURCE_URL
lowercase__ = "train-images-idx3-ubyte.gz"
lowercase__ = "train-labels-idx1-ubyte.gz"
lowercase__ = "t10k-images-idx3-ubyte.gz"
lowercase__ = "t10k-labels-idx1-ubyte.gz"
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + train_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_images_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_images(__magic_name__ )
lowercase__ = _maybe_download(
__magic_name__ , __magic_name__ , source_url + test_labels_file )
with gfile.Open(__magic_name__ , "rb" ) as f:
lowercase__ = _extract_labels(__magic_name__ , one_hot=__magic_name__ )
if not 0 <= validation_size <= len(__magic_name__ ):
lowercase__ = (
"Validation size should be between 0 and "
f'''{len(__magic_name__ )}. Received: {validation_size}.'''
)
raise ValueError(__magic_name__ )
lowercase__ = train_images[:validation_size]
lowercase__ = train_labels[:validation_size]
lowercase__ = train_images[validation_size:]
lowercase__ = train_labels[validation_size:]
lowercase__ = {"dtype": dtype, "reshape": reshape, "seed": seed}
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
lowercase__ = _DataSet(__magic_name__ , __magic_name__ , **__magic_name__ )
return _Datasets(train=__magic_name__ , validation=__magic_name__ , test=__magic_name__ )
| 655 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowercase ( UpperCamelCase : str = "AAPL" ):
"""simple docstring"""
A__ : Union[str, Any] =F'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
A__ : Dict =BeautifulSoup(requests.get(UpperCamelCase ).text , "html.parser" )
A__ : str ="My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 656 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Any = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__A : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__A : Tuple = 250_004
__A : Optional[int] = 250_020
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[Any] = MBartTokenizer
__magic_name__ : Dict = MBartTokenizerFast
__magic_name__ : Tuple = True
__magic_name__ : int = True
def _UpperCAmelCase ( self : List[str] ):
super().setUp()
# We have a SentencePiece fixture for testing
A__ : str =MBartTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCAmelCase ( self : Dict ):
A__ : str =MBartTokenizer(UpperCamelCase__ , keep_accents=UpperCamelCase__ )
A__ : List[Any] =tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
A__ : Optional[Any] =tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
A__ : Optional[int] =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
A__ : Union[str, Any] =tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertListEqual(
UpperCamelCase__ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _UpperCAmelCase ( self : Optional[Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
A__ : Any =(self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
A__ : Any =self.rust_tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : List[str] =self.tokenizer_class.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
A__ : Any =tempfile.mkdtemp()
A__ : str =tokenizer_r.save_pretrained(UpperCamelCase__ )
A__ : int =tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
A__ : Tuple =tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
A__ : Dict =tokenizer_r.from_pretrained(UpperCamelCase__ )
A__ : List[Any] =tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=True
A__ : Optional[Any] =tempfile.mkdtemp()
A__ : str =tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
A__ : List[str] =tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCamelCase__ , UpperCamelCase__ )
# Checks everything loads correctly in the same way
A__ : int =tokenizer_r.from_pretrained(UpperCamelCase__ )
A__ : int =tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
# Save tokenizer rust, legacy_format=False
A__ : List[str] =tempfile.mkdtemp()
A__ : str =tokenizer_r.save_pretrained(UpperCamelCase__ , legacy_format=UpperCamelCase__ )
A__ : Any =tokenizer_p.save_pretrained(UpperCamelCase__ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
A__ : List[str] =tokenizer_r.from_pretrained(UpperCamelCase__ )
A__ : Any =tokenizer_p.from_pretrained(UpperCamelCase__ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCamelCase__ , UpperCamelCase__ ) )
shutil.rmtree(UpperCamelCase__ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = """facebook/mbart-large-en-ro"""
__magic_name__ : List[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
__magic_name__ : str = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
__magic_name__ : Dict = [82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2, EN_CODE]
@classmethod
def _UpperCAmelCase ( cls : Any ):
A__ : MBartTokenizer =MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO" )
A__ : List[Any] =1
return cls
def _UpperCAmelCase ( self : Optional[int] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020 )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Optional[Any] =self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] ):
self.assertIn(UpperCamelCase__ , self.tokenizer.all_special_ids )
A__ : Optional[Any] =[RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
A__ : Tuple =self.tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
A__ : List[Any] =self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Dict =["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , UpperCamelCase__ )
A__ : Any =10
A__ : Optional[Any] =self.tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ , truncation=UpperCamelCase__ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
def _UpperCAmelCase ( self : int ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [250026, 250001] )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[Any] =tempfile.mkdtemp()
A__ : int =self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCamelCase__ )
A__ : int =MBartTokenizer.from_pretrained(UpperCamelCase__ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCamelCase__ )
@require_torch
def _UpperCAmelCase ( self : Tuple ):
A__ : Optional[Any] =self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , return_tensors="pt" )
A__ : str =shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def _UpperCAmelCase ( self : Any ):
A__ : Optional[int] =self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
A__ : Union[str, Any] =shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
A__ : Tuple =batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase__ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[Any] =self.tokenizer(self.src_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=3 , return_tensors="pt" )
A__ : Union[str, Any] =self.tokenizer(
text_target=self.tgt_text , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=10 , return_tensors="pt" )
A__ : List[str] =targets["input_ids"]
A__ : int =shift_tokens_right(UpperCamelCase__ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCAmelCase ( self : Tuple ):
A__ : Dict =self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR" )
self.assertEqual(
nested_simplify(UpperCamelCase__ ) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 656 | """simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A__ : Tuple =[]
for _ in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A__ : Dict =[]
for step in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] =os.path.join(UpperCamelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase )
A__ : Dict =torch.load(UpperCamelCase )
scheduler.load_state_dict(UpperCamelCase )
return lrs
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
A__ : Optional[Any] =torch.tensor([0.4, 0.2, -0.5] )
A__ : Any =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ : List[str] =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A__ : Optional[int] =criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[int] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
A__ : Dict =torch.tensor([0.4, 0.2, -0.5] )
A__ : Optional[int] =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ : int =Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , )
for _ in range(1000 ):
A__ : List[Any] =criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = nn.Linear(50 , 50) if is_torch_available() else None
__magic_name__ : Any = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None
__magic_name__ : Union[str, Any] = 10
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Union[str, Any] ={"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A__ : Union[str, Any] ={
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A__ , A__ : Any =data
A__ : Union[str, Any] =scheduler_func(self.optimizer , **UpperCamelCase__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A__ : int =unwrap_schedule(UpperCamelCase__ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase__ , UpperCamelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
A__ : List[str] =scheduler_func(self.optimizer , **UpperCamelCase__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__ ) # wrap to test picklability of the schedule
A__ : Tuple =unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : str ):
A__ : int =fn
def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ):
return self.fn(*UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict ):
A__ : str =list(map(self , scheduler.lr_lambdas ) )
| 656 | 1 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : int = logging.get_logger(__name__)
__A : int = {
"facebook/wav2vec2-base-960h": "https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Optional[Any] = """wav2vec2"""
def __init__( self : Dict , UpperCamelCase__ : Union[str, Any]=32 , UpperCamelCase__ : int=768 , UpperCamelCase__ : Union[str, Any]=12 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : List[Any]=3072 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Optional[int]=0.0 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : Dict="group" , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : Optional[int]=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase__ : Optional[int]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Optional[int]=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[Any]=128 , UpperCamelCase__ : Optional[int]=16 , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]=0.05 , UpperCamelCase__ : Optional[int]=10 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Union[str, Any]=10 , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : str=320 , UpperCamelCase__ : Optional[Any]=2 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : int=100 , UpperCamelCase__ : str=256 , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Optional[int]="sum" , UpperCamelCase__ : str=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Optional[Any]=256 , UpperCamelCase__ : Any=(512, 512, 512, 512, 1500) , UpperCamelCase__ : Tuple=(5, 3, 3, 1, 1) , UpperCamelCase__ : Optional[Any]=(1, 2, 3, 1, 1) , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : int=1 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int=None , **UpperCamelCase__ : List[Any] , ):
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
A__ : List[str] =hidden_size
A__ : Union[str, Any] =feat_extract_norm
A__ : Tuple =feat_extract_activation
A__ : str =list(UpperCamelCase__ )
A__ : List[str] =list(UpperCamelCase__ )
A__ : Optional[Any] =list(UpperCamelCase__ )
A__ : Tuple =conv_bias
A__ : str =num_conv_pos_embeddings
A__ : str =num_conv_pos_embedding_groups
A__ : Tuple =len(self.conv_dim )
A__ : List[Any] =num_hidden_layers
A__ : Optional[Any] =intermediate_size
A__ : List[Any] =hidden_act
A__ : List[str] =num_attention_heads
A__ : Optional[int] =hidden_dropout
A__ : List[Any] =attention_dropout
A__ : Union[str, Any] =activation_dropout
A__ : Any =feat_proj_dropout
A__ : Optional[int] =final_dropout
A__ : List[str] =layerdrop
A__ : Tuple =layer_norm_eps
A__ : Any =initializer_range
A__ : List[Any] =vocab_size
A__ : Optional[Any] =do_stable_layer_norm
A__ : int =use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A__ : str =apply_spec_augment
A__ : int =mask_time_prob
A__ : List[Any] =mask_time_length
A__ : Tuple =mask_time_min_masks
A__ : List[Any] =mask_feature_prob
A__ : List[str] =mask_feature_length
A__ : Any =mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
A__ : Tuple =num_codevectors_per_group
A__ : List[Any] =num_codevector_groups
A__ : Any =contrastive_logits_temperature
A__ : Dict =feat_quantizer_dropout
A__ : Optional[Any] =num_negatives
A__ : Union[str, Any] =codevector_dim
A__ : int =proj_codevector_dim
A__ : str =diversity_loss_weight
# ctc loss
A__ : Union[str, Any] =ctc_loss_reduction
A__ : Optional[Any] =ctc_zero_infinity
# adapter
A__ : Optional[int] =add_adapter
A__ : Dict =adapter_kernel_size
A__ : Optional[int] =adapter_stride
A__ : Any =num_adapter_layers
A__ : Optional[int] =output_hidden_size or hidden_size
A__ : List[str] =adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
A__ : int =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
A__ : Tuple =list(UpperCamelCase__ )
A__ : Any =list(UpperCamelCase__ )
A__ : Any =list(UpperCamelCase__ )
A__ : Dict =xvector_output_dim
@property
def _UpperCAmelCase ( self : Tuple ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 656 | """simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger("transformers.models.speecht5")
__A : Optional[Any] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A : Optional[int] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A : List[str] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A : List[Any] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A : Union[str, Any] = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A : Any = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A : Union[str, Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A : Optional[int] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : int = []
__A : int = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A : Optional[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A : Tuple = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A : Union[str, Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ):
"""simple docstring"""
for attribute in key.split("." ):
A__ : Dict =getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
A__ : Union[str, Any] =getattr(UpperCamelCase , UpperCamelCase ).shape
else:
A__ : Tuple =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A__ : Any =value
elif weight_type == "weight_g":
A__ : Any =value
elif weight_type == "weight_v":
A__ : Any =value
elif weight_type == "bias":
A__ : Tuple =value
elif weight_type == "running_mean":
A__ : Dict =value
elif weight_type == "running_var":
A__ : List[str] =value
elif weight_type == "num_batches_tracked":
A__ : Dict =value
else:
A__ : Optional[int] =value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ : List[str] =key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Tuple =[]
if task == "s2t":
A__ : Dict =hf_model.speechta.encoder.prenet.feature_encoder
A__ : int =MAPPING_S2T
A__ : List[Any] =IGNORE_KEYS_S2T
elif task == "t2s":
A__ : Union[str, Any] =None
A__ : List[Any] =MAPPING_T2S
A__ : Tuple =IGNORE_KEYS_T2S
elif task == "s2s":
A__ : Optional[Any] =hf_model.speechta.encoder.prenet.feature_encoder
A__ : Tuple =MAPPING_S2S
A__ : Any =IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase , UpperCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
A__ : Optional[Any] =False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
A__ : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A__ , A__ : Dict =key.split(".*." )
if prefix in name and suffix in name:
A__ : int =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A__ : List[Any] =True
if "*" in mapped_key:
A__ : Optional[int] =name.split(UpperCamelCase )[0].split("." )[-2]
A__ : int =mapped_key.replace("*" , UpperCamelCase )
if "weight_g" in name:
A__ : str ="weight_g"
elif "weight_v" in name:
A__ : Optional[Any] ="weight_v"
elif "bias" in name:
A__ : Any ="bias"
elif "weight" in name:
A__ : Optional[int] ="weight"
elif "running_mean" in name:
A__ : Tuple ="running_mean"
elif "running_var" in name:
A__ : Optional[int] ="running_var"
elif "num_batches_tracked" in name:
A__ : str ="num_batches_tracked"
else:
A__ : List[Any] =None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Any =full_name.split("conv_layers." )[-1]
A__ : Dict =name.split("." )
A__ : int =int(items[0] )
A__ : str =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A__ : Optional[Any] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A__ : Optional[int] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if config_path is not None:
A__ : Any =SpeechTaConfig.from_pretrained(UpperCamelCase )
else:
A__ : Any =SpeechTaConfig()
if task == "s2t":
A__ : Union[str, Any] =config.max_text_positions
A__ : Dict =SpeechTaForSpeechToText(UpperCamelCase )
elif task == "t2s":
A__ : str =1876
A__ : Optional[int] =600
A__ : Tuple =config.max_speech_positions
A__ : Optional[Any] =SpeechTaForTextToSpeech(UpperCamelCase )
elif task == "s2s":
A__ : str =1876
A__ : Tuple =config.max_speech_positions
A__ : Any =SpeechTaForSpeechToSpeech(UpperCamelCase )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
A__ : str =SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A__ : Optional[Any] =AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
A__ : int =mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
A__ : Dict =SpeechTaFeatureExtractor()
A__ : Tuple =SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
A__ : Union[str, Any] =torch.load(UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase , UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A : str = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 656 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class __lowerCAmelCase :
'''simple docstring'''
__magic_name__ : int = PegasusConfig
__magic_name__ : Tuple = {}
__magic_name__ : Optional[int] = """gelu"""
def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : int=7 , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Dict=False , UpperCamelCase__ : Tuple=99 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[int]=40 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Union[str, Any]=0 , ):
A__ : int =parent
A__ : Optional[Any] =batch_size
A__ : Optional[Any] =seq_length
A__ : Optional[Any] =is_training
A__ : List[str] =use_labels
A__ : Any =vocab_size
A__ : int =hidden_size
A__ : Any =num_hidden_layers
A__ : str =num_attention_heads
A__ : int =intermediate_size
A__ : Union[str, Any] =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : str =max_position_embeddings
A__ : Tuple =eos_token_id
A__ : Any =pad_token_id
A__ : Dict =bos_token_id
def _UpperCAmelCase ( self : Any ):
A__ : List[Any] =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ : Tuple =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ : Optional[Any] =tf.concat([input_ids, eos_tensor] , axis=1 )
A__ : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : Dict =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ : Union[str, Any] =prepare_pegasus_inputs_dict(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return config, inputs_dict
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
A__ : Optional[Any] =TFPegasusModel(config=UpperCamelCase__ ).get_decoder()
A__ : List[str] =inputs_dict["input_ids"]
A__ : Tuple =input_ids[:1, :]
A__ : Any =inputs_dict["attention_mask"][:1, :]
A__ : Tuple =inputs_dict["head_mask"]
A__ : int =1
# first forward pass
A__ : Dict =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , head_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ )
A__ , A__ : str =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ : Optional[Any] =ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ : Any =tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ : Union[str, Any] =tf.concat([input_ids, next_tokens] , axis=-1 )
A__ : Any =tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ : Union[str, Any] =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
A__ : int =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ : Any =int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ : Optional[int] =output_from_no_past[:, -3:, random_slice_idx]
A__ : List[str] =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__ , UpperCamelCase__ , rtol=1E-3 )
def lowercase ( UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : List[str]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : str=None , ):
"""simple docstring"""
if attention_mask is None:
A__ : Optional[int] =tf.cast(tf.math.not_equal(UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ : Any =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ : List[str] =tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ : int =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ : Optional[Any] =tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : List[Any] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__magic_name__ : Any = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__magic_name__ : Tuple = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__magic_name__ : Tuple = True
__magic_name__ : int = False
__magic_name__ : Optional[int] = False
def _UpperCAmelCase ( self : List[str] ):
A__ : List[str] =TFPegasusModelTester(self )
A__ : str =ConfigTester(self , config_class=UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : int ):
A__ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
@require_sentencepiece
@require_tokenizers
@require_tf
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : Any = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__magic_name__ : List[Any] = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__magic_name__ : Optional[Any] = """google/pegasus-xsum"""
@cached_property
def _UpperCAmelCase ( self : str ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _UpperCAmelCase ( self : Any ):
A__ : Tuple =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _UpperCAmelCase ( self : int , **UpperCamelCase__ : int ):
A__ : str =self.translate_src_text(**UpperCamelCase__ )
assert self.expected_text == generated_words
def _UpperCAmelCase ( self : Union[str, Any] , **UpperCamelCase__ : List[Any] ):
A__ : Dict =self.tokenizer(self.src_text , **UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="tf" )
A__ : str =self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=UpperCamelCase__ , )
A__ : int =self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=UpperCamelCase__ )
return generated_words
@slow
def _UpperCAmelCase ( self : Tuple ):
self._assert_generated_batch_equal_expected()
| 656 | """simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 50257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ):
super().__init__()
A__ : Dict =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
A__ : Optional[int] =prefix_inner_dim
A__ : Optional[int] =prefix_hidden_dim
A__ : Optional[int] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : str =GPTaConfig(
vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , )
A__ : Any =GPTaLMHeadModel(UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ):
A__ : int =self.transformer.transformer.wte(UpperCamelCase__ )
A__ : Tuple =self.encode_prefix(UpperCamelCase__ )
A__ : Union[str, Any] =self.decode_prefix(UpperCamelCase__ )
A__ : Tuple =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] =torch.cat((dummy_token, input_ids) , dim=1 )
A__ : Any =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ):
return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ):
return self.encode_prefix(UpperCamelCase__ )
@torch.no_grad()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
A__ : Optional[int] =torch.split(UpperCamelCase__ , 1 , dim=0 )
A__ : List[str] =[]
A__ : Dict =[]
for feature in features:
A__ : Any =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Optional[Any] =self.generate_beam(
input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : Optional[Any] =torch.stack(UpperCamelCase__ )
A__ : Optional[int] =torch.stack(UpperCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ):
A__ : str =eos_token_id
A__ : Optional[Any] =None
A__ : int =None
A__ : Union[str, Any] =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int )
A__ : Any =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Union[str, Any] =input_embeds
else:
A__ : Optional[Any] =self.transformer.transformer.wte(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
A__ : Optional[int] =self.transformer(inputs_embeds=UpperCamelCase__ )
A__ : Tuple =outputs.logits
A__ : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Optional[Any] =logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Union[str, Any] =logits.topk(UpperCamelCase__ , -1 )
A__ : Union[str, Any] =generated.expand(UpperCamelCase__ , *generated.shape[1:] )
A__ , A__ : Optional[int] =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : str =next_tokens
else:
A__ : Optional[Any] =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] )
A__ : str =torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Union[str, Any] =-float(np.inf )
A__ : Dict =0
A__ : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Optional[Any] =scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 )
A__ : Tuple =next_tokens // scores_sum.shape[1]
A__ : List[Any] =seq_lengths[next_tokens_source]
A__ : int =next_tokens % scores_sum.shape[1]
A__ : str =next_tokens.unsqueeze(1 )
A__ : List[Any] =tokens[next_tokens_source]
A__ : int =torch.cat((tokens, next_tokens) , dim=1 )
A__ : List[str] =generated[next_tokens_source]
A__ : Optional[Any] =scores_sum_average * seq_lengths
A__ : Optional[int] =is_stopped[next_tokens_source]
A__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : str =torch.cat((generated, next_token_embed) , dim=1 )
A__ : str =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze()
if is_stopped.all():
break
A__ : Optional[int] =scores / seq_lengths
A__ : List[Any] =scores.argsort(descending=UpperCamelCase__ )
# tokens tensors are already padded to max_seq_length
A__ : int =[tokens[i] for i in order]
A__ : Any =torch.stack(UpperCamelCase__ , dim=0 )
A__ : int =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 656 | 1 |
"""simple docstring"""
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
__A : Dict = argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
__A : Union[str, Any] = parser.parse_args()
__A : List[str] = "cpu"
__A : Union[str, Any] = "a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
__A : List[Any] = "path-to-your-trained-model"
__A : Any = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
__A : Optional[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
__A : Optional[Any] = pipe.to(device)
# to channels last
__A : Optional[int] = pipe.unet.to(memory_format=torch.channels_last)
__A : Union[str, Any] = pipe.vae.to(memory_format=torch.channels_last)
__A : List[str] = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
__A : Optional[int] = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
__A : Optional[Any] = torch.randn(2, 4, 64, 64)
__A : Optional[int] = torch.rand(1) * 999
__A : List[Any] = torch.randn(2, 77, 768)
__A : List[Any] = (sample, timestep, encoder_hidden_status)
try:
__A : List[Any] = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
__A : Dict = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
__A : Optional[Any] = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
__A : List[Any] = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
__A : Union[str, Any] = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
__A : Tuple = 666
__A : Any = torch.Generator(device).manual_seed(seed)
__A : Dict = {"generator": generator}
if args.steps is not None:
__A : str = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
__A : Union[str, Any] = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 656 | """simple docstring"""
import os
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) )
A__ : str =os.path.join(UpperCamelCase , "triangle.txt" )
with open(UpperCamelCase ) as f:
A__ : Optional[int] =f.readlines()
A__ : str =[]
for line in triangle:
A__ : Union[str, Any] =[]
for number in line.strip().split(" " ):
numbers_from_line.append(int(UpperCamelCase ) )
a.append(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
for j in range(len(a[i] ) ):
A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0
A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase , UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 656 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Any = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
def lowercase ( UpperCamelCase : Any ):
"""simple docstring"""
A__ : str =OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
A__ : Dict =key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
A__ : Optional[int] =key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A__ : Tuple =key[key.find("patch_embed" ) + len("patch_embed" )]
A__ : Optional[Any] =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase )-1}''' )
if "norm" in key:
A__ : Dict =key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A__ : Any =key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
A__ : Tuple =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase )-1}''' )
if "layer_norm1" in key:
A__ : List[Any] =key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
A__ : Optional[int] =key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
A__ : int =key[key.find("block" ) + len("block" )]
A__ : Optional[Any] =key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase )-1}''' )
if "attn.q" in key:
A__ : Optional[Any] =key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
A__ : Union[str, Any] =key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
A__ : str =key.replace("attn" , "attention.self" )
if "fc1" in key:
A__ : Dict =key.replace("fc1" , "dense1" )
if "fc2" in key:
A__ : str =key.replace("fc2" , "dense2" )
if "linear_pred" in key:
A__ : List[Any] =key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
A__ : List[str] =key.replace("linear_fuse.conv" , "linear_fuse" )
A__ : Any =key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A__ : str =key[key.find("linear_c" ) + len("linear_c" )]
A__ : Dict =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase )-1}''' )
if "bot_conv" in key:
A__ : Union[str, Any] =key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
A__ : List[Any] =key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
A__ : int =key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
A__ : Optional[Any] =key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
A__ : Optional[Any] =key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
A__ : int =key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
A__ : List[str] =key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
A__ : Tuple =key.replace("module.last_layer_depth" , "head.head" )
A__ : int =value
return new_state_dict
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
"""simple docstring"""
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A__ : int =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
A__ : str =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
A__ : List[str] =kv_weight[
: config.hidden_sizes[i], :
]
A__ : Dict =kv_bias[: config.hidden_sizes[i]]
A__ : Any =kv_weight[
config.hidden_sizes[i] :, :
]
A__ : Any =kv_bias[config.hidden_sizes[i] :]
def lowercase ( ):
"""simple docstring"""
A__ : Optional[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg"
A__ : List[Any] =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowercase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : str=None ):
"""simple docstring"""
A__ : List[str] =GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A__ : str =GLPNImageProcessor()
# prepare image
A__ : Any =prepare_img()
A__ : Optional[int] =image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
A__ : int =torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
A__ : Union[str, Any] =rename_keys(UpperCamelCase )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase , UpperCamelCase )
# create HuggingFace model and load state dict
A__ : Optional[int] =GLPNForDepthEstimation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# forward pass
A__ : int =model(UpperCamelCase )
A__ : Optional[Any] =outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A__ : List[Any] =torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
A__ : Tuple =torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
A__ : str =torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase , )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
__A : Any = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 656 | 1 |
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__A : str = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
__A : int = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
__A : Any = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __lowerCAmelCase ( datasets.Metric):
'''simple docstring'''
def _UpperCAmelCase ( self : Tuple ):
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[
"https://github.com/jhclark/tercom",
] , )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ):
A__ : Union[str, Any] =len(references[0] )
if any(len(UpperCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
A__ : List[Any] =[[refs[i] for refs in references] for i in range(UpperCamelCase__ )]
A__ : Optional[int] =TER(
normalized=UpperCamelCase__ , no_punct=UpperCamelCase__ , asian_support=UpperCamelCase__ , case_sensitive=UpperCamelCase__ , )
A__ : Optional[int] =sb_ter.corpus_score(UpperCamelCase__ , UpperCamelCase__ )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 656 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = """gpt_neo"""
__magic_name__ : Union[str, Any] = ["""past_key_values"""]
__magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ):
A__ : Optional[Any] =vocab_size
A__ : Dict =max_position_embeddings
A__ : List[str] =hidden_size
A__ : List[Any] =num_layers
A__ : Tuple =num_heads
A__ : List[str] =intermediate_size
A__ : Tuple =window_size
A__ : Dict =activation_function
A__ : str =resid_dropout
A__ : Union[str, Any] =embed_dropout
A__ : List[str] =attention_dropout
A__ : Tuple =classifier_dropout
A__ : int =layer_norm_epsilon
A__ : int =initializer_range
A__ : str =use_cache
A__ : Tuple =bos_token_id
A__ : int =eos_token_id
A__ : int =attention_types
A__ : Any =self.expand_attention_types_params(UpperCamelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@staticmethod
def _UpperCAmelCase ( UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
import torch
A__ : List[str] =input.size()
A__ : Dict =len(UpperCamelCase )
A__ : Optional[int] =shape[dimension]
A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1
A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
A__ : Tuple =[slice(UpperCamelCase )] * rank
A__ : int =indices
A__ : Optional[int] =input[s]
A__ : Union[str, Any] =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ):
"""simple docstring"""
import torch
A__ : List[str] =torch.arange(1 , UpperCamelCase )
A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =remainders == 0
A__ : str =candidates[divisor_indices]
A__ : int =torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"}
else:
A__ : Tuple ={0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return self._config.num_heads
def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ : Union[str, Any] =seqlen + 2
A__ : List[Any] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : Optional[Any] =[
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A__ : Optional[Any] =common_inputs["attention_mask"]
if self.use_past:
A__ : Any =ordered_inputs["attention_mask"].dtype
A__ : Tuple =torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return 13
| 656 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : str = """dandelin/vilt-b32-finetuned-vqa"""
__magic_name__ : str = (
"""This is a tool that answers a question about an image. It takes an input named `image` which should be the """
"""image containing the information, as well as a `question` which should be the question in English. It """
"""returns a text that is the answer to the question."""
)
__magic_name__ : Dict = """image_qa"""
__magic_name__ : Optional[Any] = AutoProcessor
__magic_name__ : Optional[Any] = AutoModelForVisualQuestionAnswering
__magic_name__ : Optional[Any] = ["""image""", """text"""]
__magic_name__ : Any = ["""text"""]
def __init__( self : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[str] ):
requires_backends(self , ["vision"] )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : "Image" , UpperCamelCase__ : str ):
return self.pre_processor(UpperCamelCase__ , UpperCamelCase__ , return_tensors="pt" )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : Union[str, Any] ):
with torch.no_grad():
return self.model(**UpperCamelCase__ ).logits
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Any ):
A__ : Optional[int] =outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 656 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Any = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = """megatron-bert"""
def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[int] =vocab_size
A__ : Optional[int] =hidden_size
A__ : str =num_hidden_layers
A__ : Any =num_attention_heads
A__ : str =hidden_act
A__ : Optional[int] =intermediate_size
A__ : str =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : List[Any] =type_vocab_size
A__ : Tuple =initializer_range
A__ : Any =layer_norm_eps
A__ : Any =position_embedding_type
A__ : Union[str, Any] =use_cache
| 656 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 656 | """simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : list[float] ):
"""simple docstring"""
if len(UpperCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A__ : Union[str, Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
if length <= 0 or not isinstance(UpperCamelCase , UpperCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(UpperCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 656 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _UpperCAmelCase ( self : str ):
A__ : Optional[Any] =1
A__ : List[str] =3
A__ : Dict =(32, 32)
A__ : Any =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase__ )
return image
@property
def _UpperCAmelCase ( self : List[str] ):
torch.manual_seed(0 )
A__ : int =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def _UpperCAmelCase ( self : str ):
torch.manual_seed(0 )
A__ : List[Any] =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
A__ : Dict =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase__ )
@property
def _UpperCAmelCase ( self : Optional[int] ):
def extract(*UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Union[str, Any] ):
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : str ):
A__ : Optional[int] =torch.ones([0] )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : Tuple ):
self.pixel_values.to(UpperCamelCase__ )
return self
return Out()
return extract
def _UpperCAmelCase ( self : int ):
A__ : List[Any] ="cpu" # ensure determinism for the device-dependent torch.Generator
A__ : List[str] =self.dummy_cond_unet
A__ : Dict =PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
A__ : Optional[int] =self.dummy_vae
A__ : Optional[int] =self.dummy_text_encoder
A__ : Union[str, Any] =XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
A__ : Any =77
A__ : List[Any] =self.dummy_image.to(UpperCamelCase__ )
A__ : Optional[int] =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A__ : Any =AltDiffusionImgaImgPipeline(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=self.dummy_extractor , )
A__ : Tuple =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase__ )
A__ : List[Any] =alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Optional[Any] ="A painting of a squirrel eating a burger"
A__ : str =torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A__ : List[Any] =alt_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCamelCase__ , )
A__ : int =output.images
A__ : List[Any] =torch.Generator(device=UpperCamelCase__ ).manual_seed(0 )
A__ : str =alt_pipe(
[prompt] , generator=UpperCamelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0]
A__ : Dict =image[0, -3:, -3:, -1]
A__ : int =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A__ : int =np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _UpperCAmelCase ( self : List[Any] ):
A__ : int =self.dummy_cond_unet
A__ : Optional[Any] =PNDMScheduler(skip_prk_steps=UpperCamelCase__ )
A__ : Optional[Any] =self.dummy_vae
A__ : Tuple =self.dummy_text_encoder
A__ : List[str] =XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
A__ : Optional[int] =77
A__ : int =self.dummy_image.to(UpperCamelCase__ )
# put models in fp16
A__ : Optional[Any] =unet.half()
A__ : int =vae.half()
A__ : Optional[Any] =bert.half()
# make sure here that pndm scheduler skips prk
A__ : Optional[Any] =AltDiffusionImgaImgPipeline(
unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=self.dummy_extractor , )
A__ : Dict =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase__ )
A__ : Optional[int] =alt_pipe.to(UpperCamelCase__ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Union[str, Any] ="A painting of a squirrel eating a burger"
A__ : Dict =torch.manual_seed(0 )
A__ : int =alt_pipe(
[prompt] , generator=UpperCamelCase__ , num_inference_steps=2 , output_type="np" , image=UpperCamelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _UpperCAmelCase ( self : List[Any] ):
A__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
A__ : Dict =init_image.resize((760, 504) )
A__ : List[Any] ="BAAI/AltDiffusion"
A__ : List[str] =AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ : str ="A fantasy landscape, trending on artstation"
A__ : Tuple =torch.manual_seed(0 )
A__ : List[Any] =pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase__ , output_type="np" , )
A__ : Union[str, Any] =output.images[0]
A__ : List[str] =image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
A__ : Dict =np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Tuple =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A__ : Union[str, Any] =init_image.resize((768, 512) )
A__ : List[Any] =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
A__ : Tuple ="BAAI/AltDiffusion"
A__ : Optional[int] =AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase__ , safety_checker=UpperCamelCase__ , )
pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
pipe.enable_attention_slicing()
A__ : Optional[int] ="A fantasy landscape, trending on artstation"
A__ : Dict =torch.manual_seed(0 )
A__ : List[Any] =pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase__ , output_type="np" , )
A__ : Optional[int] =output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 656 | """simple docstring"""
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
A__ : Union[str, Any] =[True] * (num + 1)
A__ : Union[str, Any] =2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase ):
A__ : str =False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 656 | 1 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowercase ( ):
"""simple docstring"""
A__ : int =argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=UpperCamelCase , default=UpperCamelCase , required=UpperCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=UpperCamelCase , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=UpperCamelCase , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=UpperCamelCase , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=UpperCamelCase , default=0 , help="cuda_id." , )
A__ : List[str] =parser.parse_args()
return args
def lowercase ( UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
"""simple docstring"""
if not len(UpperCamelCase ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
A__ , A__ : int =imgs[0].size
A__ : Any =Image.new("RGB" , size=(cols * w, rows * h) )
A__ , A__ : List[str] =grid.size
for i, img in enumerate(UpperCamelCase ):
grid.paste(UpperCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]="robotic cat with wings" , UpperCamelCase : Tuple=7.5 , UpperCamelCase : int=50 , UpperCamelCase : Dict=1 , UpperCamelCase : str=42 , ):
"""simple docstring"""
A__ : List[str] =torch.Generator(pipeline.device ).manual_seed(UpperCamelCase )
A__ : Union[str, Any] =pipeline(
UpperCamelCase , guidance_scale=UpperCamelCase , num_inference_steps=UpperCamelCase , generator=UpperCamelCase , num_images_per_prompt=UpperCamelCase , ).images
A__ : Union[str, Any] =int(math.sqrt(UpperCamelCase ) )
A__ : List[Any] =image_grid(UpperCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__A : Dict = parse_args()
# Load models and create wrapper for stable diffusion
__A : List[str] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
__A : List[str] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
__A : List[str] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
__A : Dict = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
__A : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__A : List[str] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
__A : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
__A : int = unet.to(torch.device("cuda", args.cuda_id))
__A : Dict = pipeline.to(unet.device)
__A , __A : Optional[int] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
__A : Optional[int] = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 656 | """simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
A__ : Tuple =torch.nn.Linear(10 , 10 )
A__ : List[str] =torch.optim.SGD(model.parameters() , 0.1 )
A__ : Union[str, Any] =Accelerator()
A__ : str =accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : Optional[int] = None
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : str = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A : List[str] = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__A : Tuple = "▁"
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = BigBirdTokenizer
__magic_name__ : Any = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[int] = []
def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ):
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : List[Any] =vocab_file
A__ : Optional[int] =False if not self.vocab_file else True
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 656 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger("transformers.models.speecht5")
__A : Optional[Any] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A : Optional[int] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A : List[str] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A : List[Any] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A : Union[str, Any] = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A : Any = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A : Union[str, Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A : Optional[int] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : int = []
__A : int = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A : Optional[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A : Tuple = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A : Union[str, Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ):
"""simple docstring"""
for attribute in key.split("." ):
A__ : Dict =getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
A__ : Union[str, Any] =getattr(UpperCamelCase , UpperCamelCase ).shape
else:
A__ : Tuple =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A__ : Any =value
elif weight_type == "weight_g":
A__ : Any =value
elif weight_type == "weight_v":
A__ : Any =value
elif weight_type == "bias":
A__ : Tuple =value
elif weight_type == "running_mean":
A__ : Dict =value
elif weight_type == "running_var":
A__ : List[str] =value
elif weight_type == "num_batches_tracked":
A__ : Dict =value
else:
A__ : Optional[int] =value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ : List[str] =key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Tuple =[]
if task == "s2t":
A__ : Dict =hf_model.speechta.encoder.prenet.feature_encoder
A__ : int =MAPPING_S2T
A__ : List[Any] =IGNORE_KEYS_S2T
elif task == "t2s":
A__ : Union[str, Any] =None
A__ : List[Any] =MAPPING_T2S
A__ : Tuple =IGNORE_KEYS_T2S
elif task == "s2s":
A__ : Optional[Any] =hf_model.speechta.encoder.prenet.feature_encoder
A__ : Tuple =MAPPING_S2S
A__ : Any =IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase , UpperCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
A__ : Optional[Any] =False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
A__ : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A__ , A__ : Dict =key.split(".*." )
if prefix in name and suffix in name:
A__ : int =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A__ : List[Any] =True
if "*" in mapped_key:
A__ : Optional[int] =name.split(UpperCamelCase )[0].split("." )[-2]
A__ : int =mapped_key.replace("*" , UpperCamelCase )
if "weight_g" in name:
A__ : str ="weight_g"
elif "weight_v" in name:
A__ : Optional[Any] ="weight_v"
elif "bias" in name:
A__ : Any ="bias"
elif "weight" in name:
A__ : Optional[int] ="weight"
elif "running_mean" in name:
A__ : Tuple ="running_mean"
elif "running_var" in name:
A__ : Optional[int] ="running_var"
elif "num_batches_tracked" in name:
A__ : str ="num_batches_tracked"
else:
A__ : List[Any] =None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Any =full_name.split("conv_layers." )[-1]
A__ : Dict =name.split("." )
A__ : int =int(items[0] )
A__ : str =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A__ : Optional[Any] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A__ : Optional[int] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if config_path is not None:
A__ : Any =SpeechTaConfig.from_pretrained(UpperCamelCase )
else:
A__ : Any =SpeechTaConfig()
if task == "s2t":
A__ : Union[str, Any] =config.max_text_positions
A__ : Dict =SpeechTaForSpeechToText(UpperCamelCase )
elif task == "t2s":
A__ : str =1876
A__ : Optional[int] =600
A__ : Tuple =config.max_speech_positions
A__ : Optional[Any] =SpeechTaForTextToSpeech(UpperCamelCase )
elif task == "s2s":
A__ : str =1876
A__ : Tuple =config.max_speech_positions
A__ : Any =SpeechTaForSpeechToSpeech(UpperCamelCase )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
A__ : str =SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A__ : Optional[Any] =AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
A__ : int =mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
A__ : Dict =SpeechTaFeatureExtractor()
A__ : Tuple =SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
A__ : Union[str, Any] =torch.load(UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase , UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A : str = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 656 | """simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Dict =3
A__ : int =do_lower_case
A__ : str =remove_space
A__ : Optional[Any] =keep_accents
A__ : int =vocab_file
A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ : Union[str, Any] =jieba
A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Tuple =None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : Optional[int] ={}
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
if self.remove_space:
A__ : Optional[int] =" ".join(inputs.strip().split() )
else:
A__ : Optional[Any] =inputs
A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ : str =outputs.lower()
return outputs
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ )
A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ : List[str] =[]
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : Union[str, Any] =cur_pieces[1:]
else:
A__ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"""
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict=0 ):
A__ : str =floats_tensor((1, 3, 128, 128) , rng=random.Random(UpperCamelCase__ ) )
A__ : Tuple =np.random.RandomState(UpperCamelCase__ )
A__ : Any ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _UpperCAmelCase ( self : Dict ):
A__ : Any =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Union[str, Any] =self.get_dummy_inputs()
A__ : Dict =pipe(**UpperCamelCase__ ).images
A__ : Dict =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A__ : Any =np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _UpperCAmelCase ( self : Dict ):
A__ : str =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A__ : Optional[Any] =PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Dict =self.get_dummy_inputs()
A__ : Any =pipe(**UpperCamelCase__ ).images
A__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ : List[Any] =np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Tuple =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A__ : Tuple =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
# warmup pass to apply optimizations
A__ : Dict =pipe(**self.get_dummy_inputs() )
A__ : Optional[Any] =self.get_dummy_inputs()
A__ : List[str] =pipe(**UpperCamelCase__ ).images
A__ : List[str] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ : List[str] =np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCAmelCase ( self : str ):
A__ : int =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A__ : Optional[int] =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : List[str] =self.get_dummy_inputs()
A__ : int =pipe(**UpperCamelCase__ ).images
A__ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ : Union[str, Any] =np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCAmelCase ( self : int ):
A__ : List[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A__ : Dict =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Tuple =self.get_dummy_inputs()
A__ : Optional[Any] =pipe(**UpperCamelCase__ ).images
A__ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ : str =np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCAmelCase ( self : Any ):
A__ : List[Any] =OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
A__ : Union[str, Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Optional[int] =self.get_dummy_inputs()
A__ : Dict =pipe(**UpperCamelCase__ ).images
A__ : List[Any] =image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A__ : int =np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : Tuple ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCAmelCase ( self : Any ):
A__ : int =ort.SessionOptions()
A__ : List[str] =False
return options
def _UpperCAmelCase ( self : Any ):
A__ : int =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A__ : str =init_image.resize((768, 512) )
# using the PNDM scheduler by default
A__ : Tuple =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : List[str] ="A fantasy landscape, trending on artstation"
A__ : Tuple =np.random.RandomState(0 )
A__ : Union[str, Any] =pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase__ , output_type="np" , )
A__ : Dict =output.images
A__ : int =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A__ : List[Any] =np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCAmelCase ( self : List[str] ):
A__ : Optional[int] =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
A__ : List[str] =init_image.resize((768, 512) )
A__ : List[str] =LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
A__ : Tuple =OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : str ="A fantasy landscape, trending on artstation"
A__ : Tuple =np.random.RandomState(0 )
A__ : str =pipe(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase__ , output_type="np" , )
A__ : Optional[Any] =output.images
A__ : int =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A__ : Union[str, Any] =np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
| 656 | """simple docstring"""
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations(UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCamelCase : int , UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A__ : str =sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
A__ : List[str] =answer
return answer
A__ : List[Any] =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
A__ : str =[0] * (target + 1)
A__ : Optional[Any] =1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[Any] = 3
__A : Optional[Any] = 5
__A : int = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 656 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__A : List[Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[Any] = """upernet"""
def __init__( self : List[str] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : Union[str, Any]=[1, 2, 3, 6] , UpperCamelCase__ : int=True , UpperCamelCase__ : str=0.4 , UpperCamelCase__ : List[str]=384 , UpperCamelCase__ : str=256 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : str=False , UpperCamelCase__ : Dict=255 , **UpperCamelCase__ : int , ):
super().__init__(**UpperCamelCase__ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
A__ : Optional[Any] =CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] =backbone_config.get("model_type" )
A__ : Optional[Any] =CONFIG_MAPPING[backbone_model_type]
A__ : List[Any] =config_class.from_dict(UpperCamelCase__ )
A__ : Dict =backbone_config
A__ : Optional[Any] =hidden_size
A__ : List[Any] =initializer_range
A__ : Tuple =pool_scales
A__ : Optional[int] =use_auxiliary_head
A__ : List[Any] =auxiliary_loss_weight
A__ : Union[str, Any] =auxiliary_in_channels
A__ : Any =auxiliary_channels
A__ : Union[str, Any] =auxiliary_num_convs
A__ : Tuple =auxiliary_concat_input
A__ : Tuple =loss_ignore_index
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Dict =copy.deepcopy(self.__dict__ )
A__ : List[str] =self.backbone_config.to_dict()
A__ : str =self.__class__.model_type
return output
| 656 | """simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : Tuple =tf.cast(math.pi , x.dtype )
A__ : Dict =tf.cast(0.04_47_15 , x.dtype )
A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
return x * tf.tanh(tf.math.softplus(UpperCamelCase ) )
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype )
A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
A__ : str =tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ):
"""simple docstring"""
A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase )
return a * tf.math.sigmoid(UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase )
__A : Optional[Any] = tf.keras.activations.gelu
__A : Optional[Any] = approximate_gelu_wrap
else:
__A : Any = _gelu
__A : Union[str, Any] = _gelu_new
__A : List[str] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 656 | 1 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]=None ):
A__ : Dict =start
A__ : str =end
A__ : Optional[int] =val
A__ : Union[str, Any] =(start + end) // 2
A__ : Any =left
A__ : Optional[Any] =right
def __repr__( self : Union[str, Any] ):
return F'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Sequence , UpperCamelCase__ : Any ):
A__ : Tuple =collection
A__ : Dict =function
if self.collection:
A__ : List[str] =self._build_tree(0 , len(UpperCamelCase__ ) - 1 )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
self._update_tree(self.root , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ):
return self._query_range(self.root , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ):
if start == end:
return SegmentTreeNode(UpperCamelCase__ , UpperCamelCase__ , self.collection[start] )
A__ : Optional[int] =(start + end) // 2
A__ : Optional[int] =self._build_tree(UpperCamelCase__ , UpperCamelCase__ )
A__ : int =self._build_tree(mid + 1 , UpperCamelCase__ )
return SegmentTreeNode(UpperCamelCase__ , UpperCamelCase__ , self.fn(left.val , right.val ) , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ):
if node.start == i and node.end == i:
A__ : List[Any] =val
return
if i <= node.mid:
self._update_tree(node.left , UpperCamelCase__ , UpperCamelCase__ )
else:
self._update_tree(node.right , UpperCamelCase__ , UpperCamelCase__ )
A__ : List[str] =self.fn(node.left.val , node.right.val )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , UpperCamelCase__ , UpperCamelCase__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , UpperCamelCase__ , node.mid ) , self._query_range(node.right , node.mid + 1 , UpperCamelCase__ ) , )
else:
# range in right child tree
return self._query_range(node.right , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] ):
if self.root is not None:
A__ : Optional[Any] =Queue()
queue.put(self.root )
while not queue.empty():
A__ : Optional[Any] =queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
__A : Dict = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 656 | """simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[Any] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_encoder_blocks" ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Dict=[2, 2, 2, 2] , UpperCamelCase__ : Union[str, Any]=[8, 4, 2, 1] , UpperCamelCase__ : Tuple=[16, 32, 64, 128] , UpperCamelCase__ : Optional[int]=[1, 4, 8, 16] , UpperCamelCase__ : Any=[1, 2, 4, 8] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=None , ):
A__ : Tuple =parent
A__ : List[Any] =batch_size
A__ : List[Any] =image_size
A__ : Union[str, Any] =num_channels
A__ : Optional[int] =num_encoder_blocks
A__ : Any =sr_ratios
A__ : Any =depths
A__ : List[Any] =hidden_sizes
A__ : List[Any] =downsampling_rates
A__ : List[str] =num_attention_heads
A__ : int =is_training
A__ : List[Any] =use_labels
A__ : Any =hidden_act
A__ : Dict =hidden_dropout_prob
A__ : int =attention_probs_dropout_prob
A__ : List[Any] =initializer_range
A__ : Tuple =num_labels
A__ : List[Any] =scope
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Any =None
if self.use_labels:
A__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ : List[Any] =self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Tuple ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
A__ : Any =SegformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Dict =model(UpperCamelCase__ )
A__ : Optional[int] =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
A__ : str =self.num_labels
A__ : Optional[Any] =SegformerForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] =model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ : List[Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
A__ : Tuple =1
A__ : Tuple =SegformerForSemanticSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : List[str] =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ )
A__ : Dict =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : str ):
A__ : Union[str, Any] =self.prepare_config_and_inputs()
A__ , A__ , A__ : Tuple =config_and_inputs
A__ : Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : Optional[int] = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ : Dict = True
__magic_name__ : List[str] = False
__magic_name__ : Optional[Any] = False
__magic_name__ : str = False
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =SegformerModelTester(self )
A__ : Tuple =SegformerConfigTester(self , config_class=UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Dict ):
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def _UpperCAmelCase ( self : Dict ):
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def _UpperCAmelCase ( self : Tuple ):
pass
def _UpperCAmelCase ( self : List[str] ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int =model_class(UpperCamelCase__ )
A__ : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] =[*signature.parameters.keys()]
A__ : List[str] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
A__ : Union[str, Any] =True
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
A__ : Union[str, Any] =False
A__ : str =True
A__ : Optional[int] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : str =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Any =outputs.attentions
A__ : List[str] =sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Dict =True
A__ : str =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : List[Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ : Tuple =(self.model_tester.image_size // 32) ** 2
A__ : Optional[Any] =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ : int =len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : Optional[Any] =True
A__ : Any =True
A__ : Union[str, Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : Union[str, Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _UpperCAmelCase ( self : List[Any] ):
def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
A__ : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.hidden_states
A__ : int =self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : str =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] ):
if not self.model_tester.is_training:
return
A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] =True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ):
continue
A__ : List[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
A__ : int =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
A__ : Union[str, Any] =model(**UpperCamelCase__ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCAmelCase ( self : Tuple ):
pass
@slow
def _UpperCAmelCase ( self : Tuple ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =SegformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
# only resize + normalize
A__ : List[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : Union[str, Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : Union[str, Any] =prepare_img()
A__ : Union[str, Any] =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : int =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : Dict =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] =torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
# only resize + normalize
A__ : Dict =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : int =SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(UpperCamelCase__ )
A__ : Tuple =prepare_img()
A__ : str =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Optional[int] =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : List[str] =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : List[Any] =torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-1 ) )
@slow
def _UpperCAmelCase ( self : int ):
# only resize + normalize
A__ : Optional[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : List[Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : str =prepare_img()
A__ : Dict =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Any =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : Dict =model(UpperCamelCase__ )
A__ : Any =outputs.logits.detach().cpu()
A__ : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] )
A__ : List[str] =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
A__ : int =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
A__ : Tuple =torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 656 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : int = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = """swin2sr"""
__magic_name__ : Optional[int] = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Dict , UpperCamelCase__ : str=64 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : str=3 , UpperCamelCase__ : Dict=180 , UpperCamelCase__ : List[str]=[6, 6, 6, 6, 6, 6] , UpperCamelCase__ : str=[6, 6, 6, 6, 6, 6] , UpperCamelCase__ : Any=8 , UpperCamelCase__ : Tuple=2.0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=False , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Any=1.0 , UpperCamelCase__ : Union[str, Any]="1conv" , UpperCamelCase__ : Optional[Any]="pixelshuffle" , **UpperCamelCase__ : Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
A__ : List[Any] =image_size
A__ : Optional[Any] =patch_size
A__ : Optional[int] =num_channels
A__ : Dict =embed_dim
A__ : List[str] =depths
A__ : List[Any] =len(UpperCamelCase__ )
A__ : List[str] =num_heads
A__ : Dict =window_size
A__ : Optional[Any] =mlp_ratio
A__ : Optional[Any] =qkv_bias
A__ : List[Any] =hidden_dropout_prob
A__ : Any =attention_probs_dropout_prob
A__ : str =drop_path_rate
A__ : Union[str, Any] =hidden_act
A__ : Optional[Any] =use_absolute_embeddings
A__ : Union[str, Any] =layer_norm_eps
A__ : Optional[int] =initializer_range
A__ : Dict =upscale
A__ : Tuple =img_range
A__ : Dict =resi_connection
A__ : Optional[Any] =upsampler
| 656 | """simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[Any]=4 , ):
A__ : str =parent
A__ : List[str] =batch_size
A__ : Any =seq_length
A__ : List[str] =is_training
A__ : List[Any] =use_attention_mask
A__ : List[Any] =use_token_type_ids
A__ : Dict =use_labels
A__ : List[Any] =vocab_size
A__ : Optional[int] =hidden_size
A__ : Optional[Any] =num_hidden_layers
A__ : str =num_attention_heads
A__ : int =intermediate_size
A__ : Tuple =hidden_act
A__ : Tuple =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : Any =max_position_embeddings
A__ : Any =type_vocab_size
A__ : Union[str, Any] =type_sequence_label_size
A__ : Optional[Any] =initializer_range
A__ : int =num_choices
def _UpperCAmelCase ( self : Tuple ):
A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[str] =None
if self.use_attention_mask:
A__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
A__ : str =None
if self.use_token_type_ids:
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Any =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self : Tuple ):
A__ : Dict =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : str =config_and_inputs
A__ : Optional[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _UpperCAmelCase ( self : int ):
A__ : str =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Union[str, Any] =config_and_inputs
A__ : Union[str, Any] =True
A__ : List[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = True
__magic_name__ : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Optional[int] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def _UpperCAmelCase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
A__ : Tuple =model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : Union[str, Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : Tuple =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
A__ : str =model(UpperCamelCase__ )[0]
A__ : List[Any] =[1, 11, 50265]
self.assertEqual(list(output.shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
A__ : Any =np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : List[Any] ):
A__ : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : List[Any] =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
A__ : Dict =model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
A__ : Optional[Any] =np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
return " ".join(
"".join(word[::-1] ) if len(UpperCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 656 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : Any = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
__A : Optional[int] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : Union[str, Any] =torch.load(UpperCamelCase , map_location="cpu" )
return sd
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int=rename_keys_prefix ):
"""simple docstring"""
A__ : List[str] =OrderedDict()
A__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A__ : Optional[Any] =key
for name_pair in rename_keys_prefix:
A__ : int =new_key.replace(name_pair[0] , name_pair[1] )
A__ : Dict =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A__ : Optional[int] =new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : List[str] ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
A__ : Any ="pretraining"
if "vcr" in checkpoint_path:
A__ : Union[str, Any] ={"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
A__ : Optional[Any] ={"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
A__ : List[str] ={"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 512}
A__ : List[str] ="multichoice"
elif "vqa_advanced" in checkpoint_path:
A__ : Any ={"visual_embedding_dim": 2048}
A__ : str ="vqa_advanced"
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048, "num_labels": 3129}
A__ : str ="vqa"
elif "nlvr" in checkpoint_path:
A__ : str ={
"visual_embedding_dim": 1024,
"num_labels": 2,
}
A__ : Dict ="nlvr"
A__ : Union[str, Any] =VisualBertConfig(**UpperCamelCase )
# Load State Dict
A__ : int =load_state_dict(UpperCamelCase )
A__ : Tuple =get_new_dict(UpperCamelCase , UpperCamelCase )
if model_type == "pretraining":
A__ : str =VisualBertForPreTraining(UpperCamelCase )
elif model_type == "vqa":
A__ : Optional[int] =VisualBertForQuestionAnswering(UpperCamelCase )
elif model_type == "nlvr":
A__ : Union[str, Any] =VisualBertForVisualReasoning(UpperCamelCase )
elif model_type == "multichoice":
A__ : Union[str, Any] =VisualBertForMultipleChoice(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# Save Checkpoints
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
__A : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : str , UpperCamelCase : str ):
"""simple docstring"""
A__ : Tuple =len(UpperCamelCase )
A__ : Optional[int] =[]
for i in range(len(UpperCamelCase ) - pat_len + 1 ):
A__ : Optional[Any] =True
for j in range(UpperCamelCase ):
if s[i + j] != pattern[j]:
A__ : Union[str, Any] =False
break
if match_found:
position.append(UpperCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search("ABCDEFG", "DE") == [3]
print(naive_pattern_search("ABAAABCDBBABCDDEBCABC", "ABC"))
| 656 | """simple docstring"""
__A : Union[str, Any] = {str(digit): digit**5 for digit in range(10)}
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) )
def lowercase ( ):
"""simple docstring"""
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 656 | 1 |
"""simple docstring"""
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
__A : str = ["\nclass", "\ndef", "\n#", "\n@", "\nprint", "\nif"]
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=1 ):
A__ : int =tokenizer
A__ : int =dataset
A__ : Optional[int] =len(UpperCamelCase__ ) if n_tasks is None else n_tasks
A__ : Union[str, Any] =n_copies
def __iter__( self : int ):
A__ : str =[]
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
A__ : Union[str, Any] =self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ):
A__ : List[Any] =start_length
A__ : List[str] =eof_strings
A__ : str =tokenizer
def __call__( self : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , **UpperCamelCase__ : Optional[int] ):
A__ : Any =self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
A__ : Union[str, Any] =[]
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(UpperCamelCase__ )
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : Union[str, Any] =re.split("(%s)" % "|".join(UpperCamelCase ) , UpperCamelCase )
# last string should be ""
return "".join(string_list[:-2] )
def lowercase ( UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Any=20 , **UpperCamelCase : str ):
"""simple docstring"""
A__ : List[Any] =defaultdict(UpperCamelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(UpperCamelCase ) ):
with torch.no_grad():
A__ : List[str] =batch["ids"].shape[-1]
A__ : List[Any] =accelerator.unwrap_model(UpperCamelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=UpperCamelCase , **UpperCamelCase )
# each task is generated batch_size times
A__ : Dict =batch["task_id"].repeat(UpperCamelCase )
A__ : Optional[Any] =accelerator.pad_across_processes(
UpperCamelCase , dim=1 , pad_index=tokenizer.pad_token_id )
A__ , A__ : Optional[int] =accelerator.gather((generated_tokens, generated_tasks) )
A__ : Union[str, Any] =generated_tokens.cpu().numpy()
A__ : List[str] =generated_tasks.cpu().numpy()
for task, generated_tokens in zip(UpperCamelCase , UpperCamelCase ):
gen_token_dict[task].append(UpperCamelCase )
A__ : List[str] =[[] for _ in range(UpperCamelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
A__ : int =tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
code_gens[task].append(remove_last_block(UpperCamelCase ) )
return code_gens
def lowercase ( ):
"""simple docstring"""
# Setup configuration
A__ : Union[str, Any] =HfArgumentParser(UpperCamelCase )
A__ : Optional[Any] =parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
A__ : int =args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
A__ : Tuple ="false"
if args.num_workers is None:
A__ : Union[str, Any] =multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
A__ : Tuple =Accelerator()
set_seed(args.seed , device_specific=UpperCamelCase )
# Load model and tokenizer
A__ : Any =AutoTokenizer.from_pretrained(args.model_ckpt )
A__ : Union[str, Any] =tokenizer.eos_token
A__ : int =AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
A__ : List[str] ={
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , UpperCamelCase , UpperCamelCase )] ),
}
# Load evaluation dataset and metric
A__ : Any =load_dataset("openai_humaneval" )
A__ : List[str] =load_metric("code_eval" )
A__ : str =args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
A__ : str =args.n_samples // args.batch_size
A__ : Optional[Any] =TokenizedDataset(UpperCamelCase , human_eval["test"] , n_copies=UpperCamelCase , n_tasks=UpperCamelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
A__ : int =DataLoader(UpperCamelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
A__ : str =code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
A__ , A__ : Union[str, Any] =accelerator.prepare(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =complete_code(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , n_tasks=UpperCamelCase , batch_size=args.batch_size , **UpperCamelCase , )
if accelerator.is_main_process:
A__ : Optional[int] =[]
for task in tqdm(range(UpperCamelCase ) ):
A__ : Any =human_eval["test"][task]["test"]
A__ : Optional[Any] =F'''check({human_eval["test"][task]["entry_point"]})'''
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
A__ , A__ : int =code_eval_metric.compute(
references=UpperCamelCase , predictions=UpperCamelCase , num_workers=args.num_workers )
print(F'''Results: {pass_at_k}''' )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 656 | """simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__A : Optional[Any] = logging.get_logger(__name__)
# General docstring
__A : str = "PoolFormerConfig"
# Base docstring
__A : Optional[Any] = "sail/poolformer_s12"
__A : List[Any] = [1, 512, 7, 7]
# Image classification docstring
__A : List[str] = "sail/poolformer_s12"
__A : Tuple = "tabby, tabby cat"
__A : Tuple = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowercase ( UpperCamelCase : Any , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
A__ : Tuple =1 - drop_prob
A__ : List[str] =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
A__ : Any =keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
A__ : Optional[int] =input.div(UpperCamelCase ) * random_tensor
return output
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[float] = None ):
super().__init__()
A__ : Optional[int] =drop_prob
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : torch.Tensor ):
return drop_path(UpperCamelCase__ , self.drop_prob , self.training )
def _UpperCAmelCase ( self : List[str] ):
return "p={}".format(self.drop_prob )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
super().__init__()
A__ : Optional[int] =patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
A__ : Optional[int] =stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride)
A__ : int =padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding)
A__ : Any =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ )
A__ : Any =norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : str ):
A__ : List[str] =self.projection(UpperCamelCase__ )
A__ : Any =self.norm(UpperCamelCase__ )
return embeddings
class __lowerCAmelCase ( nn.GroupNorm):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Optional[int] ):
super().__init__()
A__ : Any =nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ):
return self.pool(UpperCamelCase__ ) - hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
super().__init__()
A__ : List[Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Union[str, Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Dict =PoolFormerDropPath(UpperCamelCase__ )
if isinstance(config.hidden_act , UpperCamelCase__ ):
A__ : Tuple =ACTaFN[config.hidden_act]
else:
A__ : Optional[Any] =config.hidden_act
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ):
A__ : Optional[Any] =self.conva(UpperCamelCase__ )
A__ : List[str] =self.act_fn(UpperCamelCase__ )
A__ : List[str] =self.drop(UpperCamelCase__ )
A__ : Optional[int] =self.conva(UpperCamelCase__ )
A__ : Optional[Any] =self.drop(UpperCamelCase__ )
return hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
super().__init__()
A__ : Optional[int] =PoolFormerPooling(UpperCamelCase__ )
A__ : List[str] =PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
# Useful for training neural nets
A__ : Tuple =PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity()
A__ : Optional[Any] =config.use_layer_scale
if config.use_layer_scale:
A__ : List[str] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
A__ : List[Any] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
if self.use_layer_scale:
A__ : Optional[int] =self.pooling(self.before_norm(UpperCamelCase__ ) )
A__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
A__ : Union[str, Any] =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : Tuple =()
A__ : List[str] =self.output(self.after_norm(UpperCamelCase__ ) )
A__ : Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
A__ : str =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : List[Any] =(output,) + outputs
return outputs
else:
A__ : Tuple =self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) )
# First residual connection
A__ : Optional[Any] =pooling_output + hidden_states
A__ : Tuple =()
# Second residual connection inside the PoolFormerOutput block
A__ : List[str] =self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) )
A__ : Any =hidden_states + layer_output
A__ : Tuple =(output,) + outputs
return outputs
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : List[str] ):
super().__init__()
A__ : Tuple =config
# stochastic depth decay rule
A__ : Dict =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
A__ : Tuple =[]
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
A__ : List[str] =nn.ModuleList(UpperCamelCase__ )
# Transformer blocks
A__ : Union[str, Any] =[]
A__ : Any =0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
A__ : Union[str, Any] =[]
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase__ ) )
A__ : str =nn.ModuleList(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ):
A__ : Union[str, Any] =() if output_hidden_states else None
A__ : Dict =pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
A__ , A__ : List[Any] =layers
# Get patch embeddings from hidden_states
A__ : Any =embedding_layer(UpperCamelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase__ ):
A__ : List[str] =blk(UpperCamelCase__ )
A__ : Tuple =layer_outputs[0]
if output_hidden_states:
A__ : List[Any] =all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[str] = PoolFormerConfig
__magic_name__ : int = """poolformer"""
__magic_name__ : Any = """pixel_values"""
__magic_name__ : Any = True
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ):
if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] =value
__A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Dict ):
super().__init__(UpperCamelCase__ )
A__ : List[Any] =config
A__ : Optional[Any] =PoolFormerEncoder(UpperCamelCase__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCAmelCase ( self : Tuple ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : int =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
A__ : List[Any] =self.encoder(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : int =encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A__ : List[str] =nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A__ : int =self.dense(UpperCamelCase__ )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str ):
super().__init__(UpperCamelCase__ )
A__ : List[str] =config.num_labels
A__ : Optional[int] =PoolFormerModel(UpperCamelCase__ )
# Final norm
A__ : Dict =PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
A__ : Dict =(
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
A__ : List[str] =self.poolformer(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : str =outputs[0]
A__ : List[Any] =self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) )
A__ : Optional[Any] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ : int ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ : Tuple ="single_label_classification"
else:
A__ : Optional[int] ="multi_label_classification"
if self.config.problem_type == "regression":
A__ : Dict =MSELoss()
if self.num_labels == 1:
A__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ : List[str] =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
elif self.config.problem_type == "single_label_classification":
A__ : Tuple =CrossEntropyLoss()
A__ : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ : List[Any] =BCEWithLogitsLoss()
A__ : str =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
A__ : Optional[int] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
| 656 | 1 |
"""simple docstring"""
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int = 13 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 3 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : int = 128 , UpperCamelCase__ : Optional[int]=[16, 32, 64, 128] , UpperCamelCase__ : int = 7 , UpperCamelCase__ : int = 4 , UpperCamelCase__ : int = 37 , UpperCamelCase__ : str = "gelu" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : int = 128 , UpperCamelCase__ : List[int] = [2, 2, 2, 2] , UpperCamelCase__ : int = 2 , UpperCamelCase__ : int = 2 , ):
A__ : Tuple =parent
A__ : Dict =batch_size
A__ : Tuple =image_size
A__ : Tuple =patch_size
A__ : Union[str, Any] =num_channels
A__ : int =is_training
A__ : List[str] =use_labels
A__ : List[Any] =hidden_size
A__ : Union[str, Any] =num_hidden_layers
A__ : Dict =num_attention_heads
A__ : Dict =intermediate_size
A__ : Tuple =hidden_act
A__ : Optional[Any] =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : Optional[Any] =type_sequence_label_size
A__ : str =initializer_range
A__ : Optional[int] =encoder_stride
A__ : Optional[Any] =num_attention_outputs
A__ : List[Any] =embed_dim
A__ : str =embed_dim + 1
A__ : int =resolution
A__ : Any =depths
A__ : List[str] =hidden_sizes
A__ : Any =dim
A__ : str =mlp_expansion_ratio
def _UpperCAmelCase ( self : str ):
A__ : Any =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Dict =None
if self.use_labels:
A__ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ : Any =self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : List[Any] ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ):
A__ : List[str] =TFEfficientFormerModel(config=UpperCamelCase__ )
A__ : Optional[int] =model(UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ):
A__ : Optional[int] =self.type_sequence_label_size
A__ : Dict =TFEfficientFormerForImageClassification(UpperCamelCase__ )
A__ : Optional[int] =model(UpperCamelCase__ , labels=UpperCamelCase__ , training=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ : int =1
A__ : Optional[Any] =TFEfficientFormerForImageClassification(UpperCamelCase__ )
A__ : List[str] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ : Optional[Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any =self.prepare_config_and_inputs()
A__ , A__ , A__ : List[str] =config_and_inputs
A__ : List[str] ={"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__magic_name__ : List[str] = (
{
"""feature-extraction""": TFEfficientFormerModel,
"""image-classification""": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__magic_name__ : str = False
__magic_name__ : Tuple = False
__magic_name__ : Any = False
__magic_name__ : Any = False
__magic_name__ : List[str] = False
def _UpperCAmelCase ( self : Tuple ):
A__ : Tuple =TFEfficientFormerModelTester(self )
A__ : List[str] =ConfigTester(
self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _UpperCAmelCase ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def _UpperCAmelCase ( self : str ):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def _UpperCAmelCase ( self : Tuple ):
pass
def _UpperCAmelCase ( self : List[str] ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : List[Any] =model_class(UpperCamelCase__ )
A__ : Optional[int] =inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Union[str, Any] =[*signature.parameters.keys()]
A__ : List[str] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
def check_hidden_states_output(UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
A__ : int =model_class(UpperCamelCase__ )
A__ : List[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) , training=UpperCamelCase__ )
A__ : str =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ : List[Any] =getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
if hasattr(self.model_tester , "encoder_seq_length" ):
A__ : Optional[int] =self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
A__ : Any =seq_length * self.model_tester.chunk_length
else:
A__ : Union[str, Any] =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
A__ : Optional[int] =outputs.decoder_hidden_states
self.asseretIsInstance(UpperCamelCase__ , (list, tuple) )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
A__ : Dict =getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
A__ : str =getattr(self.model_tester , "decoder_seq_length" , UpperCamelCase__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[int] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : Optional[int] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=False ):
A__ : Union[str, Any] =super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def _UpperCAmelCase ( self : List[str] ):
A__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase__ )
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
@slow
def _UpperCAmelCase ( self : Any ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : int =TFEfficientFormerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _UpperCAmelCase ( self : Any ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] =True
A__ : List[str] =getattr(self.model_tester , "seq_length" , UpperCamelCase__ )
A__ : Optional[Any] =getattr(self.model_tester , "encoder_seq_length" , UpperCamelCase__ )
A__ : Dict =getattr(self.model_tester , "key_length" , UpperCamelCase__ )
A__ : List[str] =getattr(self.model_tester , "chunk_length" , UpperCamelCase__ )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
A__ : Any =encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
A__ : Any =True
A__ : str =False
A__ : Optional[int] =True
A__ : Optional[Any] =model_class(UpperCamelCase__ )
A__ : List[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) , training=UpperCamelCase__ )
A__ : Optional[Any] =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Any =True
A__ : Dict =model_class(UpperCamelCase__ )
A__ : Union[str, Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) , training=UpperCamelCase__ )
A__ : List[str] =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _UpperCAmelCase ( self : Tuple ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
A__ , A__ : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
A__ : Optional[Any] =model_class(UpperCamelCase__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
A__ : Optional[Any] ={
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=UpperCamelCase__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
A__ : Tuple =model(UpperCamelCase__ )
self.assertTrue(outputs_dict is not None )
def lowercase ( ):
"""simple docstring"""
A__ : Tuple =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@cached_property
def _UpperCAmelCase ( self : int ):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self : Dict ):
A__ : Dict =TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
A__ : List[str] =self.default_image_processor
A__ : Optional[Any] =prepare_img()
A__ : Dict =image_processor(images=UpperCamelCase__ , return_tensors="tf" )
# forward pass
A__ : List[Any] =model(**UpperCamelCase__ , training=UpperCamelCase__ )
# verify the logits
A__ : Optional[Any] =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : List[Any] =tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : List[Any] ):
A__ : Dict =TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
A__ : Optional[Any] =self.default_image_processor
A__ : Optional[Any] =prepare_img()
A__ : int =image_processor(images=UpperCamelCase__ , return_tensors="tf" )
# forward pass
A__ : Optional[Any] =model(**UpperCamelCase__ , training=UpperCamelCase__ )
# verify the logits
A__ : Tuple =tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Any =tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 656 | """simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : int = IFInpaintingSuperResolutionPipeline
__magic_name__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__magic_name__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
__magic_name__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _UpperCAmelCase ( self : Union[str, Any] ):
return self._get_superresolution_dummy_components()
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=0 ):
if str(UpperCamelCase__ ).startswith("mps" ):
A__ : Any =torch.manual_seed(UpperCamelCase__ )
else:
A__ : Dict =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : List[str] ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCAmelCase ( self : int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _UpperCAmelCase ( self : Tuple ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCAmelCase ( self : str ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCAmelCase ( self : Dict ):
self._test_save_load_local()
def _UpperCAmelCase ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 656 | 1 |
"""simple docstring"""
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ):
A__ : Optional[Any] =""
A__ : Optional[Any] =""
A__ : Dict =[]
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ):
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
A__ : int =self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
A__ : Tuple =self.__min_dist_top_down_dp(UpperCamelCase__ , n - 1 )
A__ : Dict =self.__min_dist_top_down_dp(m - 1 , UpperCamelCase__ )
A__ : Any =self.__min_dist_top_down_dp(m - 1 , n - 1 )
A__ : Optional[int] =1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self.dp[m][n]
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : str ):
A__ : Optional[Any] =worda
A__ : Tuple =worda
A__ : Any =[[-1 for _ in range(len(UpperCamelCase__ ) )] for _ in range(len(UpperCamelCase__ ) )]
return self.__min_dist_top_down_dp(len(UpperCamelCase__ ) - 1 , len(UpperCamelCase__ ) - 1 )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : str ):
A__ : Dict =worda
A__ : str =worda
A__ : Dict =len(UpperCamelCase__ )
A__ : List[str] =len(UpperCamelCase__ )
A__ : Optional[Any] =[[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
A__ : Union[str, Any] =j
elif j == 0: # second string is empty
A__ : Union[str, Any] =i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
A__ : List[str] =self.dp[i - 1][j - 1]
else:
A__ : Optional[Any] =self.dp[i][j - 1]
A__ : Tuple =self.dp[i - 1][j]
A__ : List[Any] =self.dp[i - 1][j - 1]
A__ : Optional[Any] =1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return self.dp[m][n]
if __name__ == "__main__":
__A : str = EditDistance()
print("****************** Testing Edit Distance DP Algorithm ******************")
print()
__A : str = input("Enter the first string: ").strip()
__A : List[Any] = input("Enter the second string: ").strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("*************** End of Testing Edit Distance DP Algorithm ***************")
| 656 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Any = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 1 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__A : Any = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__)
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : bool = field(default=_UpperCamelCase , metadata={"""help""": """Whether to use SortishSampler or not."""})
__magic_name__ : bool = field(
default=_UpperCamelCase , metadata={"""help""": """Whether to use generate to calculate generative metrics (ROUGE, BLEU)."""})
__magic_name__ : Optional[int] = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `max_length` value of the model configuration."""
)
} , )
__magic_name__ : Optional[int] = field(
default=_UpperCamelCase , metadata={
"""help""": (
"""The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default """
"""to the `num_beams` value of the model configuration."""
)
} , )
__magic_name__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=_UpperCamelCase , metadata={
"""help""": """Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."""
} , )
def _UpperCAmelCase ( self : int ):
A__ : Any =super().to_dict()
for k, v in d.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Any =v.to_dict()
return d
| 656 | """simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A__ : Tuple =[]
for _ in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A__ : Dict =[]
for step in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] =os.path.join(UpperCamelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase )
A__ : Dict =torch.load(UpperCamelCase )
scheduler.load_state_dict(UpperCamelCase )
return lrs
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
A__ : Optional[Any] =torch.tensor([0.4, 0.2, -0.5] )
A__ : Any =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ : List[str] =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A__ : Optional[int] =criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[int] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
A__ : Dict =torch.tensor([0.4, 0.2, -0.5] )
A__ : Optional[int] =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ : int =Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , )
for _ in range(1000 ):
A__ : List[Any] =criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = nn.Linear(50 , 50) if is_torch_available() else None
__magic_name__ : Any = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None
__magic_name__ : Union[str, Any] = 10
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Union[str, Any] ={"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A__ : Union[str, Any] ={
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A__ , A__ : Any =data
A__ : Union[str, Any] =scheduler_func(self.optimizer , **UpperCamelCase__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A__ : int =unwrap_schedule(UpperCamelCase__ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase__ , UpperCamelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
A__ : List[str] =scheduler_func(self.optimizer , **UpperCamelCase__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__ ) # wrap to test picklability of the schedule
A__ : Tuple =unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : str ):
A__ : int =fn
def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ):
return self.fn(*UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict ):
A__ : str =list(map(self , scheduler.lr_lambdas ) )
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : list , UpperCamelCase : list , UpperCamelCase : int ):
"""simple docstring"""
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
A__ : Dict =[p / w for p, w in zip(UpperCamelCase , UpperCamelCase )]
# Creating a copy of the list and sorting profit/weight in ascending order
A__ : List[Any] =sorted(UpperCamelCase )
# declaring useful variables
A__ : Dict =len(UpperCamelCase )
A__ : Tuple =0
A__ : int =0
A__ : Union[str, Any] =0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
A__ : List[Any] =sorted_profit_by_weight[length - i - 1]
A__ : Tuple =profit_by_weight.index(UpperCamelCase )
A__ : List[Any] =-1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"Input profits, weights, and then max_weight (all positive ints) separated by "
"spaces."
)
__A : Dict = [int(x) for x in input("Input profits separated by spaces: ").split()]
__A : str = [int(x) for x in input("Input weights separated by spaces: ").split()]
__A : str = int(input("Max weight allowed: "))
# Function Call
calc_profit(profit, weight, max_weight)
| 656 | """simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger("transformers.models.speecht5")
__A : Optional[Any] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A : Optional[int] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A : List[str] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A : List[Any] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A : Union[str, Any] = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A : Any = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A : Union[str, Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A : Optional[int] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : int = []
__A : int = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A : Optional[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A : Tuple = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A : Union[str, Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ):
"""simple docstring"""
for attribute in key.split("." ):
A__ : Dict =getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
A__ : Union[str, Any] =getattr(UpperCamelCase , UpperCamelCase ).shape
else:
A__ : Tuple =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A__ : Any =value
elif weight_type == "weight_g":
A__ : Any =value
elif weight_type == "weight_v":
A__ : Any =value
elif weight_type == "bias":
A__ : Tuple =value
elif weight_type == "running_mean":
A__ : Dict =value
elif weight_type == "running_var":
A__ : List[str] =value
elif weight_type == "num_batches_tracked":
A__ : Dict =value
else:
A__ : Optional[int] =value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ : List[str] =key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Tuple =[]
if task == "s2t":
A__ : Dict =hf_model.speechta.encoder.prenet.feature_encoder
A__ : int =MAPPING_S2T
A__ : List[Any] =IGNORE_KEYS_S2T
elif task == "t2s":
A__ : Union[str, Any] =None
A__ : List[Any] =MAPPING_T2S
A__ : Tuple =IGNORE_KEYS_T2S
elif task == "s2s":
A__ : Optional[Any] =hf_model.speechta.encoder.prenet.feature_encoder
A__ : Tuple =MAPPING_S2S
A__ : Any =IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase , UpperCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
A__ : Optional[Any] =False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
A__ : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A__ , A__ : Dict =key.split(".*." )
if prefix in name and suffix in name:
A__ : int =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A__ : List[Any] =True
if "*" in mapped_key:
A__ : Optional[int] =name.split(UpperCamelCase )[0].split("." )[-2]
A__ : int =mapped_key.replace("*" , UpperCamelCase )
if "weight_g" in name:
A__ : str ="weight_g"
elif "weight_v" in name:
A__ : Optional[Any] ="weight_v"
elif "bias" in name:
A__ : Any ="bias"
elif "weight" in name:
A__ : Optional[int] ="weight"
elif "running_mean" in name:
A__ : Tuple ="running_mean"
elif "running_var" in name:
A__ : Optional[int] ="running_var"
elif "num_batches_tracked" in name:
A__ : str ="num_batches_tracked"
else:
A__ : List[Any] =None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Any =full_name.split("conv_layers." )[-1]
A__ : Dict =name.split("." )
A__ : int =int(items[0] )
A__ : str =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A__ : Optional[Any] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A__ : Optional[int] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if config_path is not None:
A__ : Any =SpeechTaConfig.from_pretrained(UpperCamelCase )
else:
A__ : Any =SpeechTaConfig()
if task == "s2t":
A__ : Union[str, Any] =config.max_text_positions
A__ : Dict =SpeechTaForSpeechToText(UpperCamelCase )
elif task == "t2s":
A__ : str =1876
A__ : Optional[int] =600
A__ : Tuple =config.max_speech_positions
A__ : Optional[Any] =SpeechTaForTextToSpeech(UpperCamelCase )
elif task == "s2s":
A__ : str =1876
A__ : Tuple =config.max_speech_positions
A__ : Any =SpeechTaForSpeechToSpeech(UpperCamelCase )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
A__ : str =SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A__ : Optional[Any] =AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
A__ : int =mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
A__ : Dict =SpeechTaFeatureExtractor()
A__ : Tuple =SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
A__ : Union[str, Any] =torch.load(UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase , UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A : str = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 656 | 1 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@staticmethod
@abstractmethod
def _UpperCAmelCase ( UpperCamelCase__ : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def _UpperCAmelCase ( self : Any ):
raise NotImplementedError()
| 656 | """simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 50257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ):
super().__init__()
A__ : Dict =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
A__ : Optional[int] =prefix_inner_dim
A__ : Optional[int] =prefix_hidden_dim
A__ : Optional[int] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : str =GPTaConfig(
vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , )
A__ : Any =GPTaLMHeadModel(UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ):
A__ : int =self.transformer.transformer.wte(UpperCamelCase__ )
A__ : Tuple =self.encode_prefix(UpperCamelCase__ )
A__ : Union[str, Any] =self.decode_prefix(UpperCamelCase__ )
A__ : Tuple =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] =torch.cat((dummy_token, input_ids) , dim=1 )
A__ : Any =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ):
return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ):
return self.encode_prefix(UpperCamelCase__ )
@torch.no_grad()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
A__ : Optional[int] =torch.split(UpperCamelCase__ , 1 , dim=0 )
A__ : List[str] =[]
A__ : Dict =[]
for feature in features:
A__ : Any =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Optional[Any] =self.generate_beam(
input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : Optional[Any] =torch.stack(UpperCamelCase__ )
A__ : Optional[int] =torch.stack(UpperCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ):
A__ : str =eos_token_id
A__ : Optional[Any] =None
A__ : int =None
A__ : Union[str, Any] =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int )
A__ : Any =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Union[str, Any] =input_embeds
else:
A__ : Optional[Any] =self.transformer.transformer.wte(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
A__ : Optional[int] =self.transformer(inputs_embeds=UpperCamelCase__ )
A__ : Tuple =outputs.logits
A__ : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Optional[Any] =logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Union[str, Any] =logits.topk(UpperCamelCase__ , -1 )
A__ : Union[str, Any] =generated.expand(UpperCamelCase__ , *generated.shape[1:] )
A__ , A__ : Optional[int] =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : str =next_tokens
else:
A__ : Optional[Any] =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] )
A__ : str =torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Union[str, Any] =-float(np.inf )
A__ : Dict =0
A__ : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Optional[Any] =scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 )
A__ : Tuple =next_tokens // scores_sum.shape[1]
A__ : List[Any] =seq_lengths[next_tokens_source]
A__ : int =next_tokens % scores_sum.shape[1]
A__ : str =next_tokens.unsqueeze(1 )
A__ : List[Any] =tokens[next_tokens_source]
A__ : int =torch.cat((tokens, next_tokens) , dim=1 )
A__ : List[str] =generated[next_tokens_source]
A__ : Optional[Any] =scores_sum_average * seq_lengths
A__ : Optional[int] =is_stopped[next_tokens_source]
A__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : str =torch.cat((generated, next_token_embed) , dim=1 )
A__ : str =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze()
if is_stopped.all():
break
A__ : Optional[int] =scores / seq_lengths
A__ : List[Any] =scores.argsort(descending=UpperCamelCase__ )
# tokens tensors are already padded to max_seq_length
A__ : int =[tokens[i] for i in order]
A__ : Any =torch.stack(UpperCamelCase__ , dim=0 )
A__ : int =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 656 | 1 |
"""simple docstring"""
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__A : Any = getLogger(__name__)
__A : Any = "cuda" if torch.cuda.is_available() else "cpu"
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : int = 8 , UpperCamelCase : str = DEFAULT_DEVICE , UpperCamelCase : Optional[int]=False , UpperCamelCase : Tuple="summarization" , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : List[str] , ):
"""simple docstring"""
A__ : List[Any] =Path(UpperCamelCase ).open("w" , encoding="utf-8" )
A__ : str =str(UpperCamelCase )
A__ : List[str] =AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ).to(UpperCamelCase )
if fpaa:
A__ : Dict =model.half()
A__ : Union[str, Any] =AutoTokenizer.from_pretrained(UpperCamelCase )
logger.info(F'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
A__ : Optional[Any] =time.time()
# update config with task specific params
use_task_specific_params(UpperCamelCase , UpperCamelCase )
if prefix is None:
A__ : Dict =prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(UpperCamelCase , UpperCamelCase ) ) ):
A__ : int =[prefix + text for text in examples_chunk]
A__ : Optional[int] =tokenizer(UpperCamelCase , return_tensors="pt" , truncation=UpperCamelCase , padding="longest" ).to(UpperCamelCase )
A__ : List[Any] =model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **UpperCamelCase , )
A__ : str =tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
A__ : Optional[Any] =int(time.time() - start_time ) # seconds
A__ : Optional[Any] =len(UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase ( ):
"""simple docstring"""
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def lowercase ( UpperCamelCase : Tuple=True ):
"""simple docstring"""
A__ : Optional[int] =argparse.ArgumentParser()
parser.add_argument("model_name" , type=UpperCamelCase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=UpperCamelCase , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=UpperCamelCase , help="where to save summaries" )
parser.add_argument("--reference_path" , type=UpperCamelCase , required=UpperCamelCase , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=UpperCamelCase , required=UpperCamelCase , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=UpperCamelCase , required=UpperCamelCase , default=UpperCamelCase , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=UpperCamelCase , required=UpperCamelCase , default=UpperCamelCase , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=UpperCamelCase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=UpperCamelCase , default=8 , required=UpperCamelCase , help="batch size" )
parser.add_argument(
"--n_obs" , type=UpperCamelCase , default=-1 , required=UpperCamelCase , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=UpperCamelCase , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
A__ , A__ : Dict =parser.parse_known_args()
A__ : List[Any] =parse_numeric_n_bool_cl_kwargs(UpperCamelCase )
if parsed_args and verbose:
print(F'''parsed the following generate kwargs: {parsed_args}''' )
A__ : Union[str, Any] =[" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
A__ : Tuple =examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
A__ : Dict =generate_summaries_or_translations(
UpperCamelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **UpperCamelCase , )
if args.reference_path is None:
return {}
# Compute scores
A__ : List[Any] =calculate_bleu if "translation" in args.task else calculate_rouge
A__ : List[Any] =[x.rstrip() for x in open(args.save_path ).readlines()]
A__ : Union[str, Any] =[x.rstrip() for x in open(args.reference_path ).readlines()][: len(UpperCamelCase )]
A__ : dict =score_fn(UpperCamelCase , UpperCamelCase )
scores.update(UpperCamelCase )
if args.dump_args:
scores.update(UpperCamelCase )
if args.info:
A__ : Any =args.info
if verbose:
print(UpperCamelCase )
if args.score_path is not None:
json.dump(UpperCamelCase , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 656 | """simple docstring"""
import os
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) )
A__ : str =os.path.join(UpperCamelCase , "triangle.txt" )
with open(UpperCamelCase ) as f:
A__ : Optional[int] =f.readlines()
A__ : str =[]
for line in triangle:
A__ : Union[str, Any] =[]
for number in line.strip().split(" " ):
numbers_from_line.append(int(UpperCamelCase ) )
a.append(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
for j in range(len(a[i] ) ):
A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0
A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase , UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 656 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A : Optional[Any] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = ["""pixel_values"""]
def __init__( self : Any , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : List[str] , ):
super().__init__(**UpperCamelCase__ )
A__ : List[Any] =size if size is not None else {"shortest_edge": 224}
A__ : Optional[int] =get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
A__ : List[str] =crop_size if crop_size is not None else {"height": 224, "width": 224}
A__ : Dict =get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="crop_size" )
A__ : Union[str, Any] =do_resize
A__ : Any =size
A__ : List[Any] =resample
A__ : Optional[int] =do_center_crop
A__ : Tuple =crop_size
A__ : List[str] =do_rescale
A__ : Tuple =rescale_factor
A__ : str =do_normalize
A__ : Optional[int] =image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A__ : Optional[int] =image_std if image_std is not None else OPENAI_CLIP_STD
A__ : List[Any] =do_convert_rgb
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any , ):
A__ : str =get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
A__ : Tuple =get_resize_output_image_size(UpperCamelCase__ , size=size["shortest_edge"] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any , ):
A__ : List[str] =get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(UpperCamelCase__ , size=(size["height"], size["width"]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any , ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : str , ):
A__ : str =do_resize if do_resize is not None else self.do_resize
A__ : str =size if size is not None else self.size
A__ : str =get_size_dict(UpperCamelCase__ , param_name="size" , default_to_square=UpperCamelCase__ )
A__ : Any =resample if resample is not None else self.resample
A__ : List[str] =do_center_crop if do_center_crop is not None else self.do_center_crop
A__ : Tuple =crop_size if crop_size is not None else self.crop_size
A__ : Union[str, Any] =get_size_dict(UpperCamelCase__ , param_name="crop_size" , default_to_square=UpperCamelCase__ )
A__ : Dict =do_rescale if do_rescale is not None else self.do_rescale
A__ : Optional[int] =rescale_factor if rescale_factor is not None else self.rescale_factor
A__ : List[Any] =do_normalize if do_normalize is not None else self.do_normalize
A__ : Union[str, Any] =image_mean if image_mean is not None else self.image_mean
A__ : Optional[Any] =image_std if image_std is not None else self.image_std
A__ : List[str] =do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A__ : List[str] =make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A__ : str =[convert_to_rgb(UpperCamelCase__ ) for image in images]
# All transformations expect numpy arrays.
A__ : Optional[int] =[to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
A__ : int =[self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
A__ : Tuple =[self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
A__ : Dict =[self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
A__ : Optional[int] =[self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
A__ : List[Any] =[to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
A__ : Optional[int] ={"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 656 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
def lowercase ( UpperCamelCase : Any ):
"""simple docstring"""
A__ : str =OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
A__ : Dict =key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
A__ : Optional[int] =key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A__ : Tuple =key[key.find("patch_embed" ) + len("patch_embed" )]
A__ : Optional[Any] =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase )-1}''' )
if "norm" in key:
A__ : Dict =key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A__ : Any =key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
A__ : Tuple =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase )-1}''' )
if "layer_norm1" in key:
A__ : List[Any] =key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
A__ : Optional[int] =key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
A__ : int =key[key.find("block" ) + len("block" )]
A__ : Optional[Any] =key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase )-1}''' )
if "attn.q" in key:
A__ : Optional[Any] =key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
A__ : Union[str, Any] =key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
A__ : str =key.replace("attn" , "attention.self" )
if "fc1" in key:
A__ : Dict =key.replace("fc1" , "dense1" )
if "fc2" in key:
A__ : str =key.replace("fc2" , "dense2" )
if "linear_pred" in key:
A__ : List[Any] =key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
A__ : List[str] =key.replace("linear_fuse.conv" , "linear_fuse" )
A__ : Any =key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A__ : str =key[key.find("linear_c" ) + len("linear_c" )]
A__ : Dict =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase )-1}''' )
if "bot_conv" in key:
A__ : Union[str, Any] =key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
A__ : List[Any] =key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
A__ : int =key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
A__ : Optional[Any] =key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
A__ : Optional[Any] =key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
A__ : int =key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
A__ : List[str] =key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
A__ : Tuple =key.replace("module.last_layer_depth" , "head.head" )
A__ : int =value
return new_state_dict
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
"""simple docstring"""
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A__ : int =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
A__ : str =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
A__ : List[str] =kv_weight[
: config.hidden_sizes[i], :
]
A__ : Dict =kv_bias[: config.hidden_sizes[i]]
A__ : Any =kv_weight[
config.hidden_sizes[i] :, :
]
A__ : Any =kv_bias[config.hidden_sizes[i] :]
def lowercase ( ):
"""simple docstring"""
A__ : Optional[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg"
A__ : List[Any] =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowercase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : str=None ):
"""simple docstring"""
A__ : List[str] =GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A__ : str =GLPNImageProcessor()
# prepare image
A__ : Any =prepare_img()
A__ : Optional[int] =image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
A__ : int =torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
A__ : Union[str, Any] =rename_keys(UpperCamelCase )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase , UpperCamelCase )
# create HuggingFace model and load state dict
A__ : Optional[int] =GLPNForDepthEstimation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# forward pass
A__ : int =model(UpperCamelCase )
A__ : Optional[Any] =outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A__ : List[Any] =torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
A__ : Tuple =torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
A__ : str =torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase , )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
__A : Any = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 656 | 1 |
"""simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def lowercase ( UpperCamelCase : Dict ):
"""simple docstring"""
A__ : str =args.pruning_method
A__ : Union[str, Any] =args.threshold
A__ : Any =args.model_name_or_path.rstrip("/" )
A__ : Union[str, Any] =args.target_model_path
print(F'''Load fine-pruned model from {model_name_or_path}''' )
A__ : Union[str, Any] =torch.load(os.path.join(UpperCamelCase , "pytorch_model.bin" ) )
A__ : List[Any] ={}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
A__ : Tuple =tensor
print(F'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
A__ : Dict =tensor
print(F'''Copied layer {name}''' )
elif "bias" in name:
A__ : Optional[Any] =tensor
print(F'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
A__ : Tuple =MagnitudeBinarizer.apply(inputs=UpperCamelCase , threshold=UpperCamelCase )
A__ : List[Any] =tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
A__ : List[str] =name[:-6]
A__ : Union[str, Any] =model[F'''{prefix_}mask_scores''']
A__ : List[str] =TopKBinarizer.apply(UpperCamelCase , UpperCamelCase )
A__ : List[Any] =tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
A__ : List[Any] =name[:-6]
A__ : str =model[F'''{prefix_}mask_scores''']
A__ : Union[str, Any] =ThresholdBinarizer.apply(UpperCamelCase , UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =tensor * mask
print(F'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
A__ : Tuple =name[:-6]
A__ : str =model[F'''{prefix_}mask_scores''']
A__ , A__ : int =-0.1, 1.1
A__ : Optional[int] =torch.sigmoid(UpperCamelCase )
A__ : Optional[Any] =s * (r - l) + l
A__ : str =s_bar.clamp(min=0.0 , max=1.0 )
A__ : Union[str, Any] =tensor * mask
print(F'''Pruned layer {name}''' )
else:
raise ValueError("Unknown pruning method" )
if target_model_path is None:
A__ : List[Any] =os.path.join(
os.path.dirname(UpperCamelCase ) , F'''bertarized_{os.path.basename(UpperCamelCase )}''' )
if not os.path.isdir(UpperCamelCase ):
shutil.copytree(UpperCamelCase , UpperCamelCase )
print(F'''\nCreated folder {target_model_path}''' )
torch.save(UpperCamelCase , os.path.join(UpperCamelCase , "pytorch_model.bin" ) )
print("\nPruned model saved! See you later!" )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
__A : Any = parser.parse_args()
main(args)
| 656 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = """gpt_neo"""
__magic_name__ : Union[str, Any] = ["""past_key_values"""]
__magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ):
A__ : Optional[Any] =vocab_size
A__ : Dict =max_position_embeddings
A__ : List[str] =hidden_size
A__ : List[Any] =num_layers
A__ : Tuple =num_heads
A__ : List[str] =intermediate_size
A__ : Tuple =window_size
A__ : Dict =activation_function
A__ : str =resid_dropout
A__ : Union[str, Any] =embed_dropout
A__ : List[str] =attention_dropout
A__ : Tuple =classifier_dropout
A__ : int =layer_norm_epsilon
A__ : int =initializer_range
A__ : str =use_cache
A__ : Tuple =bos_token_id
A__ : int =eos_token_id
A__ : int =attention_types
A__ : Any =self.expand_attention_types_params(UpperCamelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@staticmethod
def _UpperCAmelCase ( UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
import torch
A__ : List[str] =input.size()
A__ : Dict =len(UpperCamelCase )
A__ : Optional[int] =shape[dimension]
A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1
A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
A__ : Tuple =[slice(UpperCamelCase )] * rank
A__ : int =indices
A__ : Optional[int] =input[s]
A__ : Union[str, Any] =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ):
"""simple docstring"""
import torch
A__ : List[str] =torch.arange(1 , UpperCamelCase )
A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =remainders == 0
A__ : str =candidates[divisor_indices]
A__ : int =torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"}
else:
A__ : Tuple ={0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return self._config.num_heads
def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ : Union[str, Any] =seqlen + 2
A__ : List[Any] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : Optional[Any] =[
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A__ : Optional[Any] =common_inputs["attention_mask"]
if self.use_past:
A__ : Any =ordered_inputs["attention_mask"].dtype
A__ : Tuple =torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return 13
| 656 | 1 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
return [ord(UpperCamelCase ) - 96 for elem in plain]
def lowercase ( UpperCamelCase : list[int] ):
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def lowercase ( ):
"""simple docstring"""
A__ : Optional[Any] =encode(input("-> " ).strip().lower() )
print("Encoded: " , UpperCamelCase )
print("Decoded:" , decode(UpperCamelCase ) )
if __name__ == "__main__":
main()
| 656 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Any = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = """megatron-bert"""
def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[int] =vocab_size
A__ : Optional[int] =hidden_size
A__ : str =num_hidden_layers
A__ : Any =num_attention_heads
A__ : str =hidden_act
A__ : Optional[int] =intermediate_size
A__ : str =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : List[Any] =type_vocab_size
A__ : Tuple =initializer_range
A__ : Any =layer_norm_eps
A__ : Any =position_embedding_type
A__ : Union[str, Any] =use_cache
| 656 | 1 |
"""simple docstring"""
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase):
'''simple docstring'''
@register_to_config
def __init__( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : bool = False , ):
super().__init__()
A__ : List[Any] =nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A__ : Any =nn.Embedding(UpperCamelCase__ , UpperCamelCase__ )
A__ : str =False
A__ : List[str] =nn.Dropout(p=UpperCamelCase__ )
A__ : Any =TaConfig(
vocab_size=UpperCamelCase__ , d_model=UpperCamelCase__ , num_heads=UpperCamelCase__ , d_kv=UpperCamelCase__ , d_ff=UpperCamelCase__ , dropout_rate=UpperCamelCase__ , feed_forward_proj=UpperCamelCase__ , is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , )
A__ : int =nn.ModuleList()
for lyr_num in range(UpperCamelCase__ ):
A__ : Any =TaBlock(UpperCamelCase__ )
self.encoders.append(UpperCamelCase__ )
A__ : List[Any] =TaLayerNorm(UpperCamelCase__ )
A__ : int =nn.Dropout(p=UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str ):
A__ : List[str] =self.token_embedder(UpperCamelCase__ )
A__ : Tuple =encoder_input_tokens.shape[1]
A__ : int =torch.arange(UpperCamelCase__ , device=encoder_input_tokens.device )
x += self.position_encoding(UpperCamelCase__ )
A__ : Tuple =self.dropout_pre(UpperCamelCase__ )
# inverted the attention mask
A__ : Dict =encoder_input_tokens.size()
A__ : Any =self.get_extended_attention_mask(UpperCamelCase__ , UpperCamelCase__ )
for lyr in self.encoders:
A__ : Any =lyr(UpperCamelCase__ , UpperCamelCase__ )[0]
A__ : List[str] =self.layer_norm(UpperCamelCase__ )
return self.dropout_post(UpperCamelCase__ ), encoder_inputs_mask
| 656 | """simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : list[float] ):
"""simple docstring"""
if len(UpperCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A__ : Union[str, Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]=7 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : Union[str, Any]=18 , UpperCamelCase__ : Tuple=30 , UpperCamelCase__ : Optional[Any]=400 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Any=None , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=[0.5, 0.5, 0.5] , UpperCamelCase__ : Union[str, Any]=[0.5, 0.5, 0.5] , ):
A__ : Any =size if size is not None else {"height": 18, "width": 18}
A__ : int =parent
A__ : int =batch_size
A__ : List[str] =num_channels
A__ : Dict =image_size
A__ : Union[str, Any] =min_resolution
A__ : Any =max_resolution
A__ : Union[str, Any] =do_resize
A__ : int =size
A__ : int =do_normalize
A__ : List[Any] =image_mean
A__ : Tuple =image_std
def _UpperCAmelCase ( self : str ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = DPTImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self : Any ):
A__ : Union[str, Any] =DPTImageProcessingTester(self )
@property
def _UpperCAmelCase ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self : List[Any] ):
A__ : List[str] =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase__ , "image_mean" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "image_std" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_normalize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase__ , "size" ) )
def _UpperCAmelCase ( self : Tuple ):
A__ : List[Any] =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
A__ : Union[str, Any] =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _UpperCAmelCase ( self : Union[str, Any] ):
# Initialize image_processing
A__ : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , Image.Image )
# Test not batched input
A__ : str =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A__ : Optional[Any] =image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCAmelCase ( self : str ):
# Initialize image_processing
A__ : List[str] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
# Test not batched input
A__ : Tuple =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A__ : Optional[int] =image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _UpperCAmelCase ( self : Tuple ):
# Initialize image_processing
A__ : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ )
for image in image_inputs:
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
# Test not batched input
A__ : int =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A__ : str =image_processing(UpperCamelCase__ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 656 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 1 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
__A : Any = logging.get_logger(__name__)
__A : List[Any] = "Hello world! cécé herlolip"
def lowercase ( UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : bool ):
"""simple docstring"""
A__ : int =FairseqRobertaModel.from_pretrained(UpperCamelCase )
roberta.eval() # disable dropout
A__ : List[str] =roberta.model.encoder.sentence_encoder
A__ : int =XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
A__ : Optional[int] =roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" , UpperCamelCase )
A__ : Any =XLMRobertaXLForSequenceClassification(UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A__ : List[str] =roberta_sent_encoder.embed_tokens.weight
A__ : Tuple =roberta_sent_encoder.embed_positions.weight
A__ : Dict =torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
A__ : Optional[int] =roberta_sent_encoder.layer_norm.weight
A__ : Tuple =roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A__ : BertLayer =model.roberta.encoder.layer[i]
A__ : TransformerSentenceEncoderLayer =roberta_sent_encoder.layers[i]
A__ : RobertaAttention =layer.attention
A__ : Optional[Any] =roberta_layer.self_attn_layer_norm.weight
A__ : Optional[int] =roberta_layer.self_attn_layer_norm.bias
# self attention
A__ : BertSelfAttention =layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
A__ : List[str] =roberta_layer.self_attn.q_proj.weight
A__ : Any =roberta_layer.self_attn.q_proj.bias
A__ : int =roberta_layer.self_attn.k_proj.weight
A__ : Optional[Any] =roberta_layer.self_attn.k_proj.bias
A__ : List[Any] =roberta_layer.self_attn.v_proj.weight
A__ : Tuple =roberta_layer.self_attn.v_proj.bias
# self-attention output
A__ : BertSelfOutput =layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
A__ : str =roberta_layer.self_attn.out_proj.weight
A__ : List[Any] =roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
A__ : Union[str, Any] =roberta_layer.final_layer_norm.weight
A__ : List[str] =roberta_layer.final_layer_norm.bias
# intermediate
A__ : BertIntermediate =layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
A__ : Any =roberta_layer.fca.weight
A__ : Union[str, Any] =roberta_layer.fca.bias
# output
A__ : BertOutput =layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
A__ : Any =roberta_layer.fca.weight
A__ : Dict =roberta_layer.fca.bias
# end of layer
if classification_head:
A__ : List[Any] =roberta.model.classification_heads["mnli"].dense.weight
A__ : Optional[int] =roberta.model.classification_heads["mnli"].dense.bias
A__ : Union[str, Any] =roberta.model.classification_heads["mnli"].out_proj.weight
A__ : Any =roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
A__ : Optional[Any] =roberta.model.encoder.lm_head.dense.weight
A__ : List[str] =roberta.model.encoder.lm_head.dense.bias
A__ : Tuple =roberta.model.encoder.lm_head.layer_norm.weight
A__ : Optional[Any] =roberta.model.encoder.lm_head.layer_norm.bias
A__ : Tuple =roberta.model.encoder.lm_head.weight
A__ : Any =roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
A__ : torch.Tensor =roberta.encode(UpperCamelCase ).unsqueeze(0 ) # batch of size 1
A__ : List[Any] =model(UpperCamelCase )[0]
if classification_head:
A__ : str =roberta.model.classification_heads["mnli"](roberta.extract_features(UpperCamelCase ) )
else:
A__ : str =roberta.model(UpperCamelCase )[0]
print(our_output.shape , their_output.shape )
A__ : List[str] =torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
A__ : List[Any] =torch.allclose(UpperCamelCase , UpperCamelCase , atol=1E-3 )
print("Do both models output the same tensors?" , "🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(UpperCamelCase ).mkdir(parents=UpperCamelCase , exist_ok=UpperCamelCase )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
__A : str = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 656 | """simple docstring"""
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
A__ : Union[str, Any] =[True] * (num + 1)
A__ : Union[str, Any] =2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase ):
A__ : str =False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 656 | 1 |
"""simple docstring"""
from collections import defaultdict
from math import gcd
def lowercase ( UpperCamelCase : int = 1500000 ):
"""simple docstring"""
A__ : defaultdict =defaultdict(UpperCamelCase )
A__ : Any =2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCamelCase , 2 ):
if gcd(UpperCamelCase , UpperCamelCase ) > 1:
continue
A__ : Dict =2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCamelCase , limit + 1 , UpperCamelCase ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 656 | """simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
A__ : Tuple =torch.nn.Linear(10 , 10 )
A__ : List[str] =torch.optim.SGD(model.parameters() , 0.1 )
A__ : Union[str, Any] =Accelerator()
A__ : str =accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
A__ : int =str(bin(UpperCamelCase ) )[2:] # remove the leading "0b"
A__ : str =str(bin(UpperCamelCase ) )[2:] # remove the leading "0b"
A__ : Optional[Any] =max(len(UpperCamelCase ) , len(UpperCamelCase ) )
return "0b" + "".join(
str(int(char_a == "1" and char_b == "1" ) )
for char_a, char_b in zip(a_binary.zfill(UpperCamelCase ) , b_binary.zfill(UpperCamelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : Optional[int] = None
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : str = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A : List[str] = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__A : Tuple = "▁"
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = BigBirdTokenizer
__magic_name__ : Any = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[int] = []
def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ):
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : List[Any] =vocab_file
A__ : Optional[int] =False if not self.vocab_file else True
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 656 | 1 |
"""simple docstring"""
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
A__ : List[Any] =tempfile.mkdtemp()
A__ : Dict =8
# DPR tok
A__ : Union[str, Any] =[
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
A__ : Union[str, Any] =os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A__ : Dict =os.path.join(UpperCamelCase__ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
A__ : Any =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
A__ : Union[str, Any] =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
A__ : str =["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
A__ : Tuple ={"unk_token": "<unk>"}
A__ : Any =os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
A__ : List[str] =os.path.join(UpperCamelCase__ , BART_VOCAB_FILES_NAMES["vocab_file"] )
A__ : Dict =os.path.join(UpperCamelCase__ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCamelCase__ ) )
def _UpperCAmelCase ( self : str ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def _UpperCAmelCase ( self : str ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def _UpperCAmelCase ( self : int ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def _UpperCAmelCase ( self : str ):
shutil.rmtree(self.tmpdirname )
def _UpperCAmelCase ( self : Dict ):
A__ : Any =Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _UpperCAmelCase ( self : int ):
A__ : List[str] =self.get_dummy_dataset()
A__ : Optional[Any] =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
A__ : List[Any] =dataset
A__ : Tuple =RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : bool ):
A__ : List[Any] =self.get_dummy_dataset()
A__ : int =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
A__ : Optional[Any] =os.path.join(self.tmpdirname , "dataset" )
A__ : Union[str, Any] =os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
A__ : Union[str, Any] =RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
A__ : Union[str, Any] =RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCamelCase__ ) , )
return retriever
def _UpperCAmelCase ( self : Dict ):
A__ : Tuple =Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
A__ : str =os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
A__ : Union[str, Any] =os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
A__ : List[Any] ={sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(UpperCamelCase__ , open(UpperCamelCase__ , "wb" ) )
A__ : str =RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
A__ : List[str] =RagRetriever(
UpperCamelCase__ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[Any] =1
A__ : str =self.get_dummy_canonical_hf_index_retriever()
A__ : str =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
A__ , A__ , A__ : Optional[int] =retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCamelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : int =self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
A__ : Dict =self.get_dummy_dataset()
retriever.save_pretrained(UpperCamelCase__ )
A__ : List[Any] =RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[int] =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
A__ : List[str] =retriever.retrieve(UpperCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self : Dict ):
A__ : Dict =1
A__ : Tuple =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
A__ : str =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
A__ , A__ , A__ : Any =retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCamelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[Any] =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase__ )
A__ : List[str] =RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
A__ : Optional[int] =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
A__ : Union[str, Any] =retriever.retrieve(UpperCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self : List[Any] ):
A__ : List[str] =1
A__ : str =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
A__ : List[Any] =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
A__ , A__ , A__ : Any =retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCamelCase__ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase__ )
A__ : Any =RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
A__ : List[Any] =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
A__ : str =retriever.retrieve(UpperCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
def _UpperCAmelCase ( self : List[Any] ):
A__ : Tuple =1
A__ : List[str] =self.get_dummy_legacy_index_retriever()
A__ : int =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
A__ , A__ , A__ : Any =retriever.retrieve(UpperCamelCase__ , n_docs=UpperCamelCase__ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , UpperCamelCase__ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _UpperCAmelCase ( self : List[Any] ):
A__ : List[str] =self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCamelCase__ )
A__ : Optional[Any] =RagRetriever.from_pretrained(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
A__ : List[Any] =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
A__ : Dict =retriever.retrieve(UpperCamelCase__ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _UpperCAmelCase ( self : Tuple ):
import torch
A__ : Optional[Any] =1
A__ : List[str] =self.get_dummy_canonical_hf_index_retriever()
A__ : str =[[5, 7], [10, 11]]
A__ : str =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
A__ : Optional[int] =retriever(UpperCamelCase__ , UpperCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase__ )
A__ , A__ , A__ : str =(
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , np.ndarray )
A__ : Dict =retriever(
UpperCamelCase__ , UpperCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase__ , return_tensors="pt" , )
A__ , A__ , A__ , A__ : List[str] =( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
self.assertIsInstance(UpperCamelCase__ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _UpperCAmelCase ( self : List[Any] ):
A__ : List[Any] =self.get_dpr_ctx_encoder_tokenizer()
A__ : int =1
A__ : Any =self.get_dummy_custom_hf_index_retriever(from_disk=UpperCamelCase__ )
retriever.set_ctx_encoder_tokenizer(UpperCamelCase__ )
A__ : List[Any] =[[5, 7], [10, 11]]
A__ : Optional[int] =np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
A__ : Tuple =retriever(UpperCamelCase__ , UpperCamelCase__ , prefix=retriever.config.generator.prefix , n_docs=UpperCamelCase__ )
self.assertEqual(
len(UpperCamelCase__ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , UpperCamelCase__ ) # check for doc token related keys in dictionary.
| 656 | """simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Dict =3
A__ : int =do_lower_case
A__ : str =remove_space
A__ : Optional[Any] =keep_accents
A__ : int =vocab_file
A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ : Union[str, Any] =jieba
A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Tuple =None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : Optional[int] ={}
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
if self.remove_space:
A__ : Optional[int] =" ".join(inputs.strip().split() )
else:
A__ : Optional[Any] =inputs
A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ : str =outputs.lower()
return outputs
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ )
A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ : List[str] =[]
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : Union[str, Any] =cur_pieces[1:]
else:
A__ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656 | 1 |
"""simple docstring"""
import argparse
import os
import re
__A : str = "src/transformers"
# Pattern that looks at the indentation in a line.
__A : Dict = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
__A : Tuple = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
__A : List[Any] = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
__A : int = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
__A : int = re.compile(R"\[([^\]]+)\]")
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : Any =_re_indent.search(UpperCamelCase )
return "" if search is None else search.groups()[0]
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : str="" , UpperCamelCase : Any=None , UpperCamelCase : Optional[Any]=None ):
"""simple docstring"""
A__ : Union[str, Any] =0
A__ : Optional[Any] =code.split("\n" )
if start_prompt is not None:
while not lines[index].startswith(UpperCamelCase ):
index += 1
A__ : Optional[Any] =["\n".join(lines[:index] )]
else:
A__ : List[str] =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
A__ : str =[lines[index]]
index += 1
while index < len(UpperCamelCase ) and (end_prompt is None or not lines[index].startswith(UpperCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(UpperCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + " " ):
current_block.append(lines[index] )
blocks.append("\n".join(UpperCamelCase ) )
if index < len(UpperCamelCase ) - 1:
A__ : Optional[Any] =[lines[index + 1]]
index += 1
else:
A__ : Dict =[]
else:
blocks.append("\n".join(UpperCamelCase ) )
A__ : Optional[int] =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(UpperCamelCase ) > 0:
blocks.append("\n".join(UpperCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(UpperCamelCase ):
blocks.append("\n".join(lines[index:] ) )
return blocks
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
def _inner(UpperCamelCase : Tuple ):
return key(UpperCamelCase ).lower().replace("_" , "" )
return _inner
def lowercase ( UpperCamelCase : int , UpperCamelCase : Any=None ):
"""simple docstring"""
# If no key is provided, we use a noop.
def noop(UpperCamelCase : Union[str, Any] ):
return x
if key is None:
A__ : int =noop
# Constants are all uppercase, they go first.
A__ : Optional[Any] =[obj for obj in objects if key(UpperCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
A__ : Any =[obj for obj in objects if key(UpperCamelCase )[0].isupper() and not key(UpperCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
A__ : List[Any] =[obj for obj in objects if not key(UpperCamelCase )[0].isupper()]
A__ : List[Any] =ignore_underscore(UpperCamelCase )
return sorted(UpperCamelCase , key=UpperCamelCase ) + sorted(UpperCamelCase , key=UpperCamelCase ) + sorted(UpperCamelCase , key=UpperCamelCase )
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
# This inner function sort imports between [ ].
def _replace(UpperCamelCase : Tuple ):
A__ : Tuple =match.groups()[0]
if "," not in imports:
return F'''[{imports}]'''
A__ : Any =[part.strip().replace("\"" , "" ) for part in imports.split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : int =keys[:-1]
return "[" + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCamelCase )] ) + "]"
A__ : Optional[Any] =import_statement.split("\n" )
if len(UpperCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
A__ : Tuple =2 if lines[1].strip() == "[" else 1
A__ : Optional[Any] =[(i, _re_strip_line.search(UpperCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
A__ : Optional[int] =sort_objects(UpperCamelCase , key=lambda UpperCamelCase : x[1] )
A__ : str =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(UpperCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
A__ : str =_re_bracket_content.sub(_replace , lines[1] )
else:
A__ : Union[str, Any] =[part.strip().replace("\"" , "" ) for part in lines[1].split("," )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
A__ : Optional[Any] =keys[:-1]
A__ : List[Any] =get_indent(lines[1] ) + ", ".join([F'''"{k}"''' for k in sort_objects(UpperCamelCase )] )
return "\n".join(UpperCamelCase )
else:
# Finally we have to deal with imports fitting on one line
A__ : int =_re_bracket_content.sub(_replace , UpperCamelCase )
return import_statement
def lowercase ( UpperCamelCase : str , UpperCamelCase : Dict=True ):
"""simple docstring"""
with open(UpperCamelCase , encoding="utf-8" ) as f:
A__ : Any =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
A__ : Any =split_code_in_indented_blocks(
UpperCamelCase , start_prompt="_import_structure = {" , end_prompt="if TYPE_CHECKING:" )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(UpperCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
A__ : List[Any] =main_blocks[block_idx]
A__ : List[Any] =block.split("\n" )
# Get to the start of the imports.
A__ : List[Any] =0
while line_idx < len(UpperCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
A__ : Dict =len(UpperCamelCase )
else:
line_idx += 1
if line_idx >= len(UpperCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
A__ : List[str] ="\n".join(block_lines[line_idx:-1] )
A__ : Union[str, Any] =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
A__ : Tuple =split_code_in_indented_blocks(UpperCamelCase , indent_level=UpperCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
A__ : Union[str, Any] =_re_direct_key if "_import_structure = {" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
A__ : str =[(pattern.search(UpperCamelCase ).groups()[0] if pattern.search(UpperCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
A__ : Dict =[(i, key) for i, key in enumerate(UpperCamelCase ) if key is not None]
A__ : str =[x[0] for x in sorted(UpperCamelCase , key=lambda UpperCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
A__ : Optional[int] =0
A__ : Union[str, Any] =[]
for i in range(len(UpperCamelCase ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
A__ : List[str] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(UpperCamelCase )
count += 1
# And we put our main block back together with its first and last line.
A__ : Tuple ="\n".join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(UpperCamelCase ):
if check_only:
return True
else:
print(F'''Overwriting {file}.''' )
with open(UpperCamelCase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(UpperCamelCase ) )
def lowercase ( UpperCamelCase : Optional[Any]=True ):
"""simple docstring"""
A__ : Union[str, Any] =[]
for root, _, files in os.walk(UpperCamelCase ):
if "__init__.py" in files:
A__ : List[Any] =sort_imports(os.path.join(UpperCamelCase , "__init__.py" ) , check_only=UpperCamelCase )
if result:
A__ : Union[str, Any] =[os.path.join(UpperCamelCase , "__init__.py" )]
if len(UpperCamelCase ) > 0:
raise ValueError(F'''Would overwrite {len(UpperCamelCase )} files, run `make style`.''' )
if __name__ == "__main__":
__A : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
__A : List[str] = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 656 | """simple docstring"""
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations(UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCamelCase : int , UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A__ : str =sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
A__ : List[str] =answer
return answer
A__ : List[Any] =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
A__ : str =[0] * (target + 1)
A__ : Optional[Any] =1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[Any] = 3
__A : Optional[Any] = 5
__A : int = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 656 | 1 |
"""simple docstring"""
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def lowercase ( UpperCamelCase : Optional[int] , UpperCamelCase : List[Any]=() , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Dict="no" , UpperCamelCase : Union[str, Any]="29500" ):
"""simple docstring"""
A__ : str =False
A__ : Optional[Any] =False
if any(key.startswith("KAGGLE" ) for key in os.environ.keys() ):
A__ : Optional[int] =True
elif "IPython" in sys.modules:
A__ : Union[str, Any] ="google.colab" in str(sys.modules["IPython"].get_ipython() )
try:
A__ : Tuple =PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("TPU_NAME" , UpperCamelCase ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside "
"your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if num_processes is None:
A__ : Union[str, Any] =8
A__ : List[str] =PrepareForLaunch(UpperCamelCase , distributed_type="TPU" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(UpperCamelCase , args=UpperCamelCase , nprocs=UpperCamelCase , start_method="fork" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on one CPU." )
function(*UpperCamelCase )
else:
if num_processes is None:
raise ValueError(
"You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call." )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized "
"inside your training function. Restart your notebook and make sure no cells initializes an "
"`Accelerator`." )
if torch.cuda.is_initialized():
raise ValueError(
"To launch a multi-GPU training from your notebook, you need to avoid running any instruction "
"using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA "
"function." )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCamelCase , master_addr="127.0.01" , master_port=UpperCamelCase , mixed_precision=UpperCamelCase ):
A__ : Tuple =PrepareForLaunch(UpperCamelCase , distributed_type="MULTI_GPU" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(UpperCamelCase , args=UpperCamelCase , nprocs=UpperCamelCase , start_method="fork" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. "
"This likely stems from an outside import causing issues once the `notebook_launcher()` is called. "
"Please review your imports and test them when running the `notebook_launcher()` to identify "
"which one is problematic." ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
A__ : Tuple ="1"
print("Launching training on MPS." )
elif torch.cuda.is_available():
print("Launching training on one GPU." )
else:
print("Launching training on CPU." )
function(*UpperCamelCase )
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Any=() , UpperCamelCase : List[str]=2 ):
"""simple docstring"""
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=UpperCamelCase , master_addr="127.0.01" , master_port="29500" , accelerate_mixed_precision="no" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="yes" , ):
A__ : List[Any] =PrepareForLaunch(UpperCamelCase , debug=UpperCamelCase )
start_processes(UpperCamelCase , args=UpperCamelCase , nprocs=UpperCamelCase , start_method="fork" )
| 656 | """simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : Tuple =tf.cast(math.pi , x.dtype )
A__ : Dict =tf.cast(0.04_47_15 , x.dtype )
A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
return x * tf.tanh(tf.math.softplus(UpperCamelCase ) )
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype )
A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
A__ : str =tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ):
"""simple docstring"""
A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase )
return a * tf.math.sigmoid(UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase )
__A : Optional[Any] = tf.keras.activations.gelu
__A : Optional[Any] = approximate_gelu_wrap
else:
__A : Any = _gelu
__A : Union[str, Any] = _gelu_new
__A : List[str] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
while a != 0:
A__ , A__ : List[Any] =b % a, a
return b
def lowercase ( UpperCamelCase : int , UpperCamelCase : int ):
"""simple docstring"""
if gcd(UpperCamelCase , UpperCamelCase ) != 1:
A__ : List[Any] =F'''mod inverse of {a!r} and {m!r} does not exist'''
raise ValueError(UpperCamelCase )
A__ , A__ , A__ : Any =1, 0, a
A__ , A__ , A__ : Optional[int] =0, 1, m
while va != 0:
A__ : int =ua // va
A__ , A__ , A__ , A__ , A__ , A__ : List[Any] =(ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 656 | """simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[Any] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(UpperCamelCase__ , "num_encoder_blocks" ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=13 , UpperCamelCase__ : Tuple=64 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : Dict=[2, 2, 2, 2] , UpperCamelCase__ : Union[str, Any]=[8, 4, 2, 1] , UpperCamelCase__ : Tuple=[16, 32, 64, 128] , UpperCamelCase__ : Optional[int]=[1, 4, 8, 16] , UpperCamelCase__ : Any=[1, 2, 4, 8] , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Optional[Any]=None , ):
A__ : Tuple =parent
A__ : List[Any] =batch_size
A__ : List[Any] =image_size
A__ : Union[str, Any] =num_channels
A__ : Optional[int] =num_encoder_blocks
A__ : Any =sr_ratios
A__ : Any =depths
A__ : List[Any] =hidden_sizes
A__ : List[Any] =downsampling_rates
A__ : List[str] =num_attention_heads
A__ : int =is_training
A__ : List[Any] =use_labels
A__ : Any =hidden_act
A__ : Dict =hidden_dropout_prob
A__ : int =attention_probs_dropout_prob
A__ : List[Any] =initializer_range
A__ : Tuple =num_labels
A__ : List[Any] =scope
def _UpperCAmelCase ( self : Optional[int] ):
A__ : List[str] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ : Any =None
if self.use_labels:
A__ : Tuple =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
A__ : List[Any] =self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : Tuple ):
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ):
A__ : Any =SegformerModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Dict =model(UpperCamelCase__ )
A__ : Optional[int] =self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ):
A__ : str =self.num_labels
A__ : Optional[Any] =SegformerForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : Optional[Any] =model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A__ : List[Any] =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str ):
A__ : Tuple =1
A__ : Tuple =SegformerForSemanticSegmentation(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
A__ : List[str] =torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(UpperCamelCase__ )
A__ : Dict =model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def _UpperCAmelCase ( self : str ):
A__ : Union[str, Any] =self.prepare_config_and_inputs()
A__ , A__ , A__ : Tuple =config_and_inputs
A__ : Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Dict = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__magic_name__ : Optional[int] = (
{
"""feature-extraction""": SegformerModel,
"""image-classification""": SegformerForImageClassification,
"""image-segmentation""": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ : Dict = True
__magic_name__ : List[str] = False
__magic_name__ : Optional[Any] = False
__magic_name__ : str = False
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =SegformerModelTester(self )
A__ : Tuple =SegformerConfigTester(self , config_class=UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Dict ):
A__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*UpperCamelCase__ )
@unittest.skip("SegFormer does not use inputs_embeds" )
def _UpperCAmelCase ( self : Dict ):
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def _UpperCAmelCase ( self : Tuple ):
pass
def _UpperCAmelCase ( self : List[str] ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : int =model_class(UpperCamelCase__ )
A__ : Optional[int] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ : Optional[int] =[*signature.parameters.keys()]
A__ : List[str] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _UpperCAmelCase ( self : str ):
A__ , A__ : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
A__ : Union[str, Any] =True
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
A__ : Union[str, Any] =False
A__ : str =True
A__ : Optional[int] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : str =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Any =outputs.attentions
A__ : List[str] =sum(self.model_tester.depths )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A__ : Dict =True
A__ : str =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Any =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Union[str, Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : List[Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
A__ : Tuple =(self.model_tester.image_size // 32) ** 2
A__ : Optional[Any] =(self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
A__ : int =len(UpperCamelCase__ )
# Check attention is always last and order is fine
A__ : Optional[Any] =True
A__ : Any =True
A__ : Union[str, Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : Optional[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
self.assertEqual(out_len + 1 , len(UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first attentions (first block, first layer)
A__ : Union[str, Any] =(self.model_tester.image_size // 4) ** 2
A__ : Tuple =(self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _UpperCAmelCase ( self : List[Any] ):
def check_hidden_states_output(UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
A__ : Optional[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
A__ : List[Any] =model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
A__ : Optional[Any] =outputs.hidden_states
A__ : int =self.model_tester.num_encoder_blocks
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
A__ , A__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ : Optional[Any] =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ : str =True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] ):
if not self.model_tester.is_training:
return
A__ , A__ : int =self.model_tester.prepare_config_and_inputs_for_common()
A__ : List[Any] =True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCamelCase__ ):
continue
A__ : List[Any] =model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.train()
A__ : int =self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
A__ : Union[str, Any] =model(**UpperCamelCase__ ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCAmelCase ( self : Tuple ):
pass
@slow
def _UpperCAmelCase ( self : Tuple ):
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ : Tuple =SegformerModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
# only resize + normalize
A__ : List[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : Union[str, Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : Union[str, Any] =prepare_img()
A__ : Union[str, Any] =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : int =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : Dict =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : Optional[int] =torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
# only resize + normalize
A__ : Dict =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : int =SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(UpperCamelCase__ )
A__ : Tuple =prepare_img()
A__ : str =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Optional[int] =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : int =model(UpperCamelCase__ )
A__ : List[str] =torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
A__ : List[Any] =torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1E-1 ) )
@slow
def _UpperCAmelCase ( self : int ):
# only resize + normalize
A__ : Optional[Any] =SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase__ , align=UpperCamelCase__ , do_random_crop=UpperCamelCase__ )
A__ : List[Any] =SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
UpperCamelCase__ )
A__ : str =prepare_img()
A__ : Dict =image_processor(images=UpperCamelCase__ , return_tensors="pt" )
A__ : Any =encoded_inputs.pixel_values.to(UpperCamelCase__ )
with torch.no_grad():
A__ : Dict =model(UpperCamelCase__ )
A__ : Any =outputs.logits.detach().cpu()
A__ : Union[str, Any] =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(500, 300)] )
A__ : List[str] =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
A__ : int =image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
A__ : Tuple =torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : list[list[int]] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : set ):
"""simple docstring"""
A__ , A__ : Union[str, Any] =len(UpperCamelCase ), len(grid[0] )
if (
min(UpperCamelCase , UpperCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A__ : Optional[Any] =0
count += depth_first_search(UpperCamelCase , row + 1 , UpperCamelCase , UpperCamelCase )
count += depth_first_search(UpperCamelCase , row - 1 , UpperCamelCase , UpperCamelCase )
count += depth_first_search(UpperCamelCase , UpperCamelCase , col + 1 , UpperCamelCase )
count += depth_first_search(UpperCamelCase , UpperCamelCase , col - 1 , UpperCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | """simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[str]=99 , UpperCamelCase__ : Optional[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Union[str, Any]=37 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : Any=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[str]=0.02 , UpperCamelCase__ : List[Any]=4 , ):
A__ : str =parent
A__ : List[str] =batch_size
A__ : Any =seq_length
A__ : List[str] =is_training
A__ : List[Any] =use_attention_mask
A__ : List[Any] =use_token_type_ids
A__ : Dict =use_labels
A__ : List[Any] =vocab_size
A__ : Optional[int] =hidden_size
A__ : Optional[Any] =num_hidden_layers
A__ : str =num_attention_heads
A__ : int =intermediate_size
A__ : Tuple =hidden_act
A__ : Tuple =hidden_dropout_prob
A__ : Dict =attention_probs_dropout_prob
A__ : Any =max_position_embeddings
A__ : Any =type_vocab_size
A__ : Union[str, Any] =type_sequence_label_size
A__ : Optional[Any] =initializer_range
A__ : int =num_choices
def _UpperCAmelCase ( self : Tuple ):
A__ : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ : List[str] =None
if self.use_attention_mask:
A__ : Optional[int] =random_attention_mask([self.batch_size, self.seq_length] )
A__ : str =None
if self.use_token_type_ids:
A__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A__ : Any =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCAmelCase ( self : Tuple ):
A__ : Dict =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : str =config_and_inputs
A__ : Optional[Any] ={"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _UpperCAmelCase ( self : int ):
A__ : str =self.prepare_config_and_inputs()
A__ , A__ , A__ , A__ : Union[str, Any] =config_and_inputs
A__ : Union[str, Any] =True
A__ : List[Any] =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = True
__magic_name__ : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Optional[int] =FlaxRobertaPreLayerNormModelTester(self )
@slow
def _UpperCAmelCase ( self : List[Any] ):
for model_class_name in self.all_model_classes:
A__ : Tuple =model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : Union[str, Any] =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@slow
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : Tuple =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
A__ : str =model(UpperCamelCase__ )[0]
A__ : List[Any] =[1, 11, 50265]
self.assertEqual(list(output.shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
A__ : Any =np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
@slow
def _UpperCAmelCase ( self : List[Any] ):
A__ : Union[str, Any] =FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
A__ : List[Any] =np.array([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] , dtype=jnp.intaa )
A__ : Dict =model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
A__ : Optional[Any] =np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) )
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : list ):
"""simple docstring"""
A__ : Union[str, Any] =0
while len(UpperCamelCase ) > 1:
A__ : Optional[int] =0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
A__ : Optional[Any] =files.index(min(UpperCamelCase ) )
temp += files[min_index]
files.pop(UpperCamelCase )
files.append(UpperCamelCase )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger(__name__)
__A : Any = [
("bert.bert", "visual_bert"),
("bert.cls", "cls"),
("bert.classifier", "cls"),
("token_type_embeddings_visual", "visual_token_type_embeddings"),
("position_embeddings_visual", "visual_position_embeddings"),
("projection", "visual_projection"),
]
__A : Optional[int] = [
"nlvr2_coco_pre_trained.th",
"nlvr2_fine_tuned.th",
"nlvr2_pre_trained.th",
"vcr_coco_pre_train.th",
"vcr_fine_tune.th",
"vcr_pre_train.th",
"vqa_coco_pre_trained.th",
"vqa_fine_tuned.th",
"vqa_pre_trained.th",
]
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
A__ : Union[str, Any] =torch.load(UpperCamelCase , map_location="cpu" )
return sd
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : int=rename_keys_prefix ):
"""simple docstring"""
A__ : List[str] =OrderedDict()
A__ : str =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
A__ : Optional[Any] =key
for name_pair in rename_keys_prefix:
A__ : int =new_key.replace(name_pair[0] , name_pair[1] )
A__ : Dict =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
A__ : Optional[int] =new_d["cls.predictions.bias"]
return new_d
@torch.no_grad()
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : List[str] ):
"""simple docstring"""
assert (
checkpoint_path.split("/" )[-1] in ACCEPTABLE_CHECKPOINTS
), F'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
A__ : Any ="pretraining"
if "vcr" in checkpoint_path:
A__ : Union[str, Any] ={"visual_embedding_dim": 512}
elif "vqa_advanced" in checkpoint_path:
A__ : Optional[Any] ={"visual_embedding_dim": 2048}
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048}
elif "nlvr" in checkpoint_path:
A__ : List[str] ={"visual_embedding_dim": 1024}
else:
raise NotImplementedError(F'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 512}
A__ : List[str] ="multichoice"
elif "vqa_advanced" in checkpoint_path:
A__ : Any ={"visual_embedding_dim": 2048}
A__ : str ="vqa_advanced"
elif "vqa" in checkpoint_path:
A__ : Optional[int] ={"visual_embedding_dim": 2048, "num_labels": 3129}
A__ : str ="vqa"
elif "nlvr" in checkpoint_path:
A__ : str ={
"visual_embedding_dim": 1024,
"num_labels": 2,
}
A__ : Dict ="nlvr"
A__ : Union[str, Any] =VisualBertConfig(**UpperCamelCase )
# Load State Dict
A__ : int =load_state_dict(UpperCamelCase )
A__ : Tuple =get_new_dict(UpperCamelCase , UpperCamelCase )
if model_type == "pretraining":
A__ : str =VisualBertForPreTraining(UpperCamelCase )
elif model_type == "vqa":
A__ : Optional[int] =VisualBertForQuestionAnswering(UpperCamelCase )
elif model_type == "nlvr":
A__ : Union[str, Any] =VisualBertForVisualReasoning(UpperCamelCase )
elif model_type == "multichoice":
A__ : Union[str, Any] =VisualBertForMultipleChoice(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
# Save Checkpoints
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
__A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("orig_checkpoint_path", type=str, help="A path to .th on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", type=str, help="Path to the output PyTorch model.")
__A : str = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 656 | 1 |
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__A : Optional[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : int ):
super().__init__()
A__ : str =torchvision.models.resnetaaa(pretrained=UpperCamelCase__ )
A__ : Any =list(model.children() )[:-2]
A__ : List[str] =nn.Sequential(*UpperCamelCase__ )
A__ : Tuple =nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Optional[Any] ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
A__ : Any =self.pool(self.model(UpperCamelCase__ ) )
A__ : int =torch.flatten(UpperCamelCase__ , start_dim=2 )
A__ : Tuple =out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ):
A__ : List[Any] =[json.loads(UpperCamelCase__ ) for l in open(UpperCamelCase__ )]
A__ : List[Any] =os.path.dirname(UpperCamelCase__ )
A__ : Dict =tokenizer
A__ : Any =labels
A__ : List[str] =len(UpperCamelCase__ )
A__ : Tuple =max_seq_length
A__ : Optional[Any] =transforms
def __len__( self : List[str] ):
return len(self.data )
def __getitem__( self : Any , UpperCamelCase__ : List[str] ):
A__ : Tuple =torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=UpperCamelCase__ ) )
A__ , A__ , A__ : Any =sentence[0], sentence[1:-1], sentence[-1]
A__ : Dict =sentence[: self.max_seq_length]
A__ : str =torch.zeros(self.n_classes )
A__ : Union[str, Any] =1
A__ : Union[str, Any] =Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
A__ : int =self.transforms(UpperCamelCase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _UpperCAmelCase ( self : int ):
A__ : List[Any] =Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def lowercase ( UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Dict =[len(row["sentence"] ) for row in batch]
A__ , A__ : Tuple =len(UpperCamelCase ), max(UpperCamelCase )
A__ : Dict =torch.zeros(UpperCamelCase , UpperCamelCase , dtype=torch.long )
A__ : Tuple =torch.zeros(UpperCamelCase , UpperCamelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(UpperCamelCase , UpperCamelCase ) ):
A__ : Union[str, Any] =input_row["sentence"]
A__ : Optional[Any] =1
A__ : Dict =torch.stack([row["image"] for row in batch] )
A__ : Any =torch.stack([row["label"] for row in batch] )
A__ : Tuple =torch.stack([row["image_start_token"] for row in batch] )
A__ : Any =torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowercase ( ):
"""simple docstring"""
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowercase ( ):
"""simple docstring"""
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_77_70_44, 0.44_53_14_29, 0.40_66_10_17] , std=[0.12_22_19_94, 0.12_14_58_35, 0.14_38_04_69] , ),
] )
| 656 | """simple docstring"""
__A : Union[str, Any] = {str(digit): digit**5 for digit in range(10)}
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(UpperCamelCase ) )
def lowercase ( ):
"""simple docstring"""
return sum(
number
for number in range(1000 , 1000000 )
if number == digits_fifth_powers_sum(UpperCamelCase ) )
if __name__ == "__main__":
print(solution())
| 656 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : Optional[Any] = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : int = ["""pixel_values"""]
def __init__( self : Union[str, Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Any , ):
super().__init__(**UpperCamelCase__ )
A__ : Tuple =size if size is not None else {"shortest_edge": 256}
A__ : Any =get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
A__ : Optional[int] =crop_size if crop_size is not None else {"height": 224, "width": 224}
A__ : Union[str, Any] =get_size_dict(UpperCamelCase__ )
A__ : List[Any] =do_resize
A__ : Optional[Any] =size
A__ : Union[str, Any] =resample
A__ : Dict =do_center_crop
A__ : Optional[Any] =crop_size
A__ : Optional[Any] =do_rescale
A__ : Union[str, Any] =rescale_factor
A__ : List[str] =do_normalize
A__ : Dict =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A__ : Dict =image_std if image_std is not None else IMAGENET_STANDARD_STD
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[Any] , ):
A__ : str =get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
A__ : Dict =get_resize_output_image_size(UpperCamelCase__ , size=size["shortest_edge"] , default_to_square=UpperCamelCase__ )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[Any] , ):
A__ : List[str] =get_size_dict(UpperCamelCase__ )
return center_crop(UpperCamelCase__ , size=(size["height"], size["width"]) , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] ):
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ):
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Any , ):
A__ : Optional[Any] =do_resize if do_resize is not None else self.do_resize
A__ : Any =size if size is not None else self.size
A__ : Optional[int] =get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ )
A__ : Union[str, Any] =resample if resample is not None else self.resample
A__ : Dict =do_center_crop if do_center_crop is not None else self.do_center_crop
A__ : str =crop_size if crop_size is not None else self.crop_size
A__ : Tuple =get_size_dict(UpperCamelCase__ )
A__ : List[str] =do_rescale if do_rescale is not None else self.do_rescale
A__ : str =rescale_factor if rescale_factor is not None else self.rescale_factor
A__ : Tuple =do_normalize if do_normalize is not None else self.do_normalize
A__ : Tuple =image_mean if image_mean is not None else self.image_mean
A__ : Optional[Any] =image_std if image_std is not None else self.image_std
A__ : List[Any] =make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
A__ : int =[to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
A__ : Optional[Any] =[self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_center_crop:
A__ : Union[str, Any] =[self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images]
if do_rescale:
A__ : Optional[Any] =[self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
A__ : Any =[self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
A__ : str =[to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
A__ : Any ={"pixel_values": images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
| 656 | """simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__A : Optional[Any] = logging.get_logger(__name__)
# General docstring
__A : str = "PoolFormerConfig"
# Base docstring
__A : Optional[Any] = "sail/poolformer_s12"
__A : List[Any] = [1, 512, 7, 7]
# Image classification docstring
__A : List[str] = "sail/poolformer_s12"
__A : Tuple = "tabby, tabby cat"
__A : Tuple = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowercase ( UpperCamelCase : Any , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
A__ : Tuple =1 - drop_prob
A__ : List[str] =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
A__ : Any =keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
A__ : Optional[int] =input.div(UpperCamelCase ) * random_tensor
return output
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[float] = None ):
super().__init__()
A__ : Optional[int] =drop_prob
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : torch.Tensor ):
return drop_path(UpperCamelCase__ , self.drop_prob , self.training )
def _UpperCAmelCase ( self : List[str] ):
return "p={}".format(self.drop_prob )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
super().__init__()
A__ : Optional[int] =patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
A__ : Optional[int] =stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride)
A__ : int =padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding)
A__ : Any =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ )
A__ : Any =norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : str ):
A__ : List[str] =self.projection(UpperCamelCase__ )
A__ : Any =self.norm(UpperCamelCase__ )
return embeddings
class __lowerCAmelCase ( nn.GroupNorm):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Optional[int] ):
super().__init__()
A__ : Any =nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ):
return self.pool(UpperCamelCase__ ) - hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
super().__init__()
A__ : List[Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Union[str, Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Dict =PoolFormerDropPath(UpperCamelCase__ )
if isinstance(config.hidden_act , UpperCamelCase__ ):
A__ : Tuple =ACTaFN[config.hidden_act]
else:
A__ : Optional[Any] =config.hidden_act
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ):
A__ : Optional[Any] =self.conva(UpperCamelCase__ )
A__ : List[str] =self.act_fn(UpperCamelCase__ )
A__ : List[str] =self.drop(UpperCamelCase__ )
A__ : Optional[int] =self.conva(UpperCamelCase__ )
A__ : Optional[Any] =self.drop(UpperCamelCase__ )
return hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
super().__init__()
A__ : Optional[int] =PoolFormerPooling(UpperCamelCase__ )
A__ : List[str] =PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
# Useful for training neural nets
A__ : Tuple =PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity()
A__ : Optional[Any] =config.use_layer_scale
if config.use_layer_scale:
A__ : List[str] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
A__ : List[Any] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
if self.use_layer_scale:
A__ : Optional[int] =self.pooling(self.before_norm(UpperCamelCase__ ) )
A__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
A__ : Union[str, Any] =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : Tuple =()
A__ : List[str] =self.output(self.after_norm(UpperCamelCase__ ) )
A__ : Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
A__ : str =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : List[Any] =(output,) + outputs
return outputs
else:
A__ : Tuple =self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) )
# First residual connection
A__ : Optional[Any] =pooling_output + hidden_states
A__ : Tuple =()
# Second residual connection inside the PoolFormerOutput block
A__ : List[str] =self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) )
A__ : Any =hidden_states + layer_output
A__ : Tuple =(output,) + outputs
return outputs
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : List[str] ):
super().__init__()
A__ : Tuple =config
# stochastic depth decay rule
A__ : Dict =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
A__ : Tuple =[]
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
A__ : List[str] =nn.ModuleList(UpperCamelCase__ )
# Transformer blocks
A__ : Union[str, Any] =[]
A__ : Any =0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
A__ : Union[str, Any] =[]
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase__ ) )
A__ : str =nn.ModuleList(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ):
A__ : Union[str, Any] =() if output_hidden_states else None
A__ : Dict =pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
A__ , A__ : List[Any] =layers
# Get patch embeddings from hidden_states
A__ : Any =embedding_layer(UpperCamelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase__ ):
A__ : List[str] =blk(UpperCamelCase__ )
A__ : Tuple =layer_outputs[0]
if output_hidden_states:
A__ : List[Any] =all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[str] = PoolFormerConfig
__magic_name__ : int = """poolformer"""
__magic_name__ : Any = """pixel_values"""
__magic_name__ : Any = True
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ):
if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] =value
__A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Dict ):
super().__init__(UpperCamelCase__ )
A__ : List[Any] =config
A__ : Optional[Any] =PoolFormerEncoder(UpperCamelCase__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCAmelCase ( self : Tuple ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : int =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
A__ : List[Any] =self.encoder(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : int =encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A__ : List[str] =nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A__ : int =self.dense(UpperCamelCase__ )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str ):
super().__init__(UpperCamelCase__ )
A__ : List[str] =config.num_labels
A__ : Optional[int] =PoolFormerModel(UpperCamelCase__ )
# Final norm
A__ : Dict =PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
A__ : Dict =(
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
A__ : List[str] =self.poolformer(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : str =outputs[0]
A__ : List[Any] =self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) )
A__ : Optional[Any] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ : int ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ : Tuple ="single_label_classification"
else:
A__ : Optional[int] ="multi_label_classification"
if self.config.problem_type == "regression":
A__ : Dict =MSELoss()
if self.num_labels == 1:
A__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ : List[str] =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
elif self.config.problem_type == "single_label_classification":
A__ : Tuple =CrossEntropyLoss()
A__ : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ : List[Any] =BCEWithLogitsLoss()
A__ : str =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
A__ : Optional[int] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
| 656 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : Any ):
torch.manual_seed(0 )
A__ : int =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def _UpperCAmelCase ( self : Optional[Any] ):
torch.manual_seed(0 )
A__ : Dict =VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def _UpperCAmelCase ( self : Optional[int] ):
torch.manual_seed(0 )
A__ : str =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : List[Any] =self.dummy_uncond_unet
A__ : Optional[Any] =DDIMScheduler()
A__ : int =self.dummy_vq_model
A__ : List[str] =LDMPipeline(unet=UpperCamelCase__ , vqvae=UpperCamelCase__ , scheduler=UpperCamelCase__ )
ldm.to(UpperCamelCase__ )
ldm.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : int =torch.manual_seed(0 )
A__ : Optional[int] =ldm(generator=UpperCamelCase__ , num_inference_steps=2 , output_type="numpy" ).images
A__ : Dict =torch.manual_seed(0 )
A__ : Union[str, Any] =ldm(generator=UpperCamelCase__ , num_inference_steps=2 , output_type="numpy" , return_dict=UpperCamelCase__ )[0]
A__ : str =image[0, -3:, -3:, -1]
A__ : Optional[Any] =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A__ : str =np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
A__ : int =1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : List[str] ):
A__ : List[Any] =LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(UpperCamelCase__ )
ldm.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Union[str, Any] =torch.manual_seed(0 )
A__ : int =ldm(generator=UpperCamelCase__ , num_inference_steps=5 , output_type="numpy" ).images
A__ : Optional[int] =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
A__ : Optional[Any] =np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
A__ : Tuple =1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 656 | """simple docstring"""
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : int = IFInpaintingSuperResolutionPipeline
__magic_name__ : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
__magic_name__ : str = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""})
__magic_name__ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _UpperCAmelCase ( self : Union[str, Any] ):
return self._get_superresolution_dummy_components()
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int]=0 ):
if str(UpperCamelCase__ ).startswith("mps" ):
A__ : Any =torch.manual_seed(UpperCamelCase__ )
else:
A__ : Dict =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ : Tuple =floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Optional[int] =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : List[str] ={
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _UpperCAmelCase ( self : Dict ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCAmelCase ( self : int ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def _UpperCAmelCase ( self : Tuple ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCAmelCase ( self : str ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCAmelCase ( self : Dict ):
self._test_save_load_local()
def _UpperCAmelCase ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 656 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : str = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[str] = """cvt"""
def __init__( self : int , UpperCamelCase__ : List[str]=3 , UpperCamelCase__ : Optional[int]=[7, 3, 3] , UpperCamelCase__ : Optional[int]=[4, 2, 2] , UpperCamelCase__ : Any=[2, 1, 1] , UpperCamelCase__ : Dict=[64, 192, 384] , UpperCamelCase__ : str=[1, 3, 6] , UpperCamelCase__ : str=[1, 2, 10] , UpperCamelCase__ : Union[str, Any]=[4.0, 4.0, 4.0] , UpperCamelCase__ : str=[0.0, 0.0, 0.0] , UpperCamelCase__ : Any=[0.0, 0.0, 0.0] , UpperCamelCase__ : Union[str, Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : Optional[int]=[True, True, True] , UpperCamelCase__ : Dict=[False, False, True] , UpperCamelCase__ : List[str]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : Union[str, Any]=[3, 3, 3] , UpperCamelCase__ : Tuple=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : List[Any]=[1, 1, 1] , UpperCamelCase__ : int=[1, 1, 1] , UpperCamelCase__ : Optional[int]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-12 , **UpperCamelCase__ : str , ):
super().__init__(**UpperCamelCase__ )
A__ : Union[str, Any] =num_channels
A__ : str =patch_sizes
A__ : Tuple =patch_stride
A__ : int =patch_padding
A__ : Any =embed_dim
A__ : Tuple =num_heads
A__ : List[Any] =depth
A__ : Dict =mlp_ratio
A__ : Dict =attention_drop_rate
A__ : List[str] =drop_rate
A__ : Dict =drop_path_rate
A__ : Any =qkv_bias
A__ : Tuple =cls_token
A__ : Tuple =qkv_projection_method
A__ : List[str] =kernel_qkv
A__ : List[Any] =padding_kv
A__ : List[Any] =stride_kv
A__ : Union[str, Any] =padding_q
A__ : Union[str, Any] =stride_q
A__ : Optional[int] =initializer_range
A__ : List[str] =layer_norm_eps
| 656 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__A : Any = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Optional[int] = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__A : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
A__ : Any =0
for ch in input_str:
A__ : int =ord(UpperCamelCase )
A__ : str =pow(2 , UpperCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | """simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A__ : Tuple =[]
for _ in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=10 ):
"""simple docstring"""
A__ : Dict =[]
for step in range(UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A__ : List[Any] =os.path.join(UpperCamelCase , "schedule.bin" )
torch.save(scheduler.state_dict() , UpperCamelCase )
A__ : Dict =torch.load(UpperCamelCase )
scheduler.load_state_dict(UpperCamelCase )
return lrs
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ )
def _UpperCAmelCase ( self : Tuple ):
A__ : Any =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
A__ : Optional[Any] =torch.tensor([0.4, 0.2, -0.5] )
A__ : Any =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ : List[str] =AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A__ : Optional[int] =criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def _UpperCAmelCase ( self : Dict ):
A__ : Optional[int] =torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCamelCase__ )
A__ : Dict =torch.tensor([0.4, 0.2, -0.5] )
A__ : Optional[int] =nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A__ : int =Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCamelCase__ , weight_decay=0.0 , relative_step=UpperCamelCase__ , scale_parameter=UpperCamelCase__ , warmup_init=UpperCamelCase__ , )
for _ in range(1000 ):
A__ : List[Any] =criterion(UpperCamelCase__ , UpperCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[int] = nn.Linear(50 , 50) if is_torch_available() else None
__magic_name__ : Any = AdamW(m.parameters() , lr=10.0) if is_torch_available() else None
__magic_name__ : Union[str, Any] = 10
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
self.assertEqual(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) )
for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertAlmostEqual(UpperCamelCase__ , UpperCamelCase__ , delta=UpperCamelCase__ , msg=UpperCamelCase__ )
def _UpperCAmelCase ( self : Optional[Any] ):
A__ : Union[str, Any] ={"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A__ : Union[str, Any] ={
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A__ , A__ : Any =data
A__ : Union[str, Any] =scheduler_func(self.optimizer , **UpperCamelCase__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A__ : int =unwrap_schedule(UpperCamelCase__ , self.num_steps )
self.assertListAlmostEqual(
UpperCamelCase__ , UpperCamelCase__ , tol=1E-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
A__ : List[str] =scheduler_func(self.optimizer , **UpperCamelCase__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCamelCase__ ) # wrap to test picklability of the schedule
A__ : Tuple =unwrap_and_save_reload_schedule(UpperCamelCase__ , self.num_steps )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ , msg=F'''failed for {scheduler_func} in save and reload''' )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : int , UpperCamelCase__ : str ):
A__ : int =fn
def __call__( self : List[Any] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ):
return self.fn(*UpperCamelCase__ , **UpperCamelCase__ )
@classmethod
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict ):
A__ : str =list(map(self , scheduler.lr_lambdas ) )
| 656 | 1 |
"""simple docstring"""
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__A : Optional[Any] = logging.get_logger(__name__)
# General docstring
__A : str = "PoolFormerConfig"
# Base docstring
__A : Optional[Any] = "sail/poolformer_s12"
__A : List[Any] = [1, 512, 7, 7]
# Image classification docstring
__A : List[str] = "sail/poolformer_s12"
__A : Tuple = "tabby, tabby cat"
__A : Tuple = [
"sail/poolformer_s12",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowercase ( UpperCamelCase : Any , UpperCamelCase : float = 0.0 , UpperCamelCase : bool = False ):
"""simple docstring"""
if drop_prob == 0.0 or not training:
return input
A__ : Tuple =1 - drop_prob
A__ : List[str] =(input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
A__ : Any =keep_prob + torch.rand(UpperCamelCase , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
A__ : Optional[int] =input.div(UpperCamelCase ) * random_tensor
return output
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCamelCase__ : Optional[float] = None ):
super().__init__()
A__ : Optional[int] =drop_prob
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : torch.Tensor ):
return drop_path(UpperCamelCase__ , self.drop_prob , self.training )
def _UpperCAmelCase ( self : List[str] ):
return "p={}".format(self.drop_prob )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : int=None ):
super().__init__()
A__ : Optional[int] =patch_size if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (patch_size, patch_size)
A__ : Optional[int] =stride if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (stride, stride)
A__ : int =padding if isinstance(UpperCamelCase__ , collections.abc.Iterable ) else (padding, padding)
A__ : Any =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , kernel_size=UpperCamelCase__ , stride=UpperCamelCase__ , padding=UpperCamelCase__ )
A__ : Any =norm_layer(UpperCamelCase__ ) if norm_layer else nn.Identity()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : str ):
A__ : List[str] =self.projection(UpperCamelCase__ )
A__ : Any =self.norm(UpperCamelCase__ )
return embeddings
class __lowerCAmelCase ( nn.GroupNorm):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any] ):
super().__init__(1 , UpperCamelCase__ , **UpperCamelCase__ )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Tuple , UpperCamelCase__ : Optional[int] ):
super().__init__()
A__ : Any =nn.AvgPoolad(UpperCamelCase__ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[str] ):
return self.pool(UpperCamelCase__ ) - hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ):
super().__init__()
A__ : List[Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Union[str, Any] =nn.Convad(UpperCamelCase__ , UpperCamelCase__ , 1 )
A__ : Dict =PoolFormerDropPath(UpperCamelCase__ )
if isinstance(config.hidden_act , UpperCamelCase__ ):
A__ : Tuple =ACTaFN[config.hidden_act]
else:
A__ : Optional[Any] =config.hidden_act
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict ):
A__ : Optional[Any] =self.conva(UpperCamelCase__ )
A__ : List[str] =self.act_fn(UpperCamelCase__ )
A__ : List[str] =self.drop(UpperCamelCase__ )
A__ : Optional[int] =self.conva(UpperCamelCase__ )
A__ : Optional[Any] =self.drop(UpperCamelCase__ )
return hidden_states
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Any ):
super().__init__()
A__ : Optional[int] =PoolFormerPooling(UpperCamelCase__ )
A__ : List[str] =PoolFormerOutput(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
A__ : int =PoolFormerGroupNorm(UpperCamelCase__ )
# Useful for training neural nets
A__ : Tuple =PoolFormerDropPath(UpperCamelCase__ ) if drop_path > 0.0 else nn.Identity()
A__ : Optional[Any] =config.use_layer_scale
if config.use_layer_scale:
A__ : List[str] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
A__ : List[Any] =nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase__) ) , requires_grad=UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : Optional[int] ):
if self.use_layer_scale:
A__ : Optional[int] =self.pooling(self.before_norm(UpperCamelCase__ ) )
A__ : Union[str, Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
A__ : Union[str, Any] =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : Tuple =()
A__ : List[str] =self.output(self.after_norm(UpperCamelCase__ ) )
A__ : Optional[Any] =self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
A__ : str =hidden_states + self.drop_path(UpperCamelCase__ )
A__ : List[Any] =(output,) + outputs
return outputs
else:
A__ : Tuple =self.drop_path(self.pooling(self.before_norm(UpperCamelCase__ ) ) )
# First residual connection
A__ : Optional[Any] =pooling_output + hidden_states
A__ : Tuple =()
# Second residual connection inside the PoolFormerOutput block
A__ : List[str] =self.drop_path(self.output(self.after_norm(UpperCamelCase__ ) ) )
A__ : Any =hidden_states + layer_output
A__ : Tuple =(output,) + outputs
return outputs
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : List[str] ):
super().__init__()
A__ : Tuple =config
# stochastic depth decay rule
A__ : Dict =[x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
A__ : Tuple =[]
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
A__ : List[str] =nn.ModuleList(UpperCamelCase__ )
# Transformer blocks
A__ : Union[str, Any] =[]
A__ : Any =0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
A__ : Union[str, Any] =[]
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase__ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase__ ) )
A__ : str =nn.ModuleList(UpperCamelCase__ )
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Tuple=False , UpperCamelCase__ : Optional[int]=True ):
A__ : Union[str, Any] =() if output_hidden_states else None
A__ : Dict =pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
A__ , A__ : List[Any] =layers
# Get patch embeddings from hidden_states
A__ : Any =embedding_layer(UpperCamelCase__ )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase__ ):
A__ : List[str] =blk(UpperCamelCase__ )
A__ : Tuple =layer_outputs[0]
if output_hidden_states:
A__ : List[Any] =all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase__ , hidden_states=UpperCamelCase__ )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[str] = PoolFormerConfig
__magic_name__ : int = """poolformer"""
__magic_name__ : Any = """pixel_values"""
__magic_name__ : Any = True
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : str ):
if isinstance(UpperCamelCase__ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase__ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=False ):
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ : Optional[Any] =value
__A : Optional[int] = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__A : Dict = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n"
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : Dict ):
super().__init__(UpperCamelCase__ )
A__ : List[Any] =config
A__ : Optional[Any] =PoolFormerEncoder(UpperCamelCase__ )
# Initialize weights and apply final processing
self.post_init()
def _UpperCAmelCase ( self : Tuple ):
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _UpperCAmelCase ( self : str , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : int =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
A__ : List[Any] =self.encoder(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : int =encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase__ , hidden_states=encoder_outputs.hidden_states , )
class __lowerCAmelCase ( nn.Module):
'''simple docstring'''
def __init__( self : Dict , UpperCamelCase__ : Optional[Any] ):
super().__init__()
A__ : List[str] =nn.Linear(config.hidden_size , config.hidden_size )
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : List[Any] ):
A__ : int =self.dense(UpperCamelCase__ )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , _UpperCamelCase , )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCamelCase__ : str ):
super().__init__(UpperCamelCase__ )
A__ : List[str] =config.num_labels
A__ : Optional[int] =PoolFormerModel(UpperCamelCase__ )
# Final norm
A__ : Dict =PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
A__ : Dict =(
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[torch.LongTensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ):
A__ : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
A__ : List[str] =self.poolformer(
UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , return_dict=UpperCamelCase__ , )
A__ : str =outputs[0]
A__ : List[Any] =self.classifier(self.norm(UpperCamelCase__ ).mean([-2, -1] ) )
A__ : Optional[Any] =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A__ : int ="regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A__ : Tuple ="single_label_classification"
else:
A__ : Optional[int] ="multi_label_classification"
if self.config.problem_type == "regression":
A__ : Dict =MSELoss()
if self.num_labels == 1:
A__ : Optional[Any] =loss_fct(logits.squeeze() , labels.squeeze() )
else:
A__ : List[str] =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
elif self.config.problem_type == "single_label_classification":
A__ : Tuple =CrossEntropyLoss()
A__ : int =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A__ : List[Any] =BCEWithLogitsLoss()
A__ : str =loss_fct(UpperCamelCase__ , UpperCamelCase__ )
if not return_dict:
A__ : Optional[int] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase__ , logits=UpperCamelCase__ , hidden_states=outputs.hidden_states )
| 656 | """simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__A : List[Any] = logging.get_logger("transformers.models.speecht5")
__A : Optional[Any] = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
__A : Optional[int] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
__A : List[str] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
__A : List[Any] = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
__A : Union[str, Any] = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
__A : Any = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
__A : Union[str, Any] = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
__A : Optional[int] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
__A : Union[str, Any] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__A : Optional[Any] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__A : int = []
__A : int = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
__A : Optional[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
__A : Tuple = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
__A : Union[str, Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase ( UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : int ):
"""simple docstring"""
for attribute in key.split("." ):
A__ : Dict =getattr(UpperCamelCase , UpperCamelCase )
if weight_type is not None:
A__ : Union[str, Any] =getattr(UpperCamelCase , UpperCamelCase ).shape
else:
A__ : Tuple =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
A__ : Any =value
elif weight_type == "weight_g":
A__ : Any =value
elif weight_type == "weight_v":
A__ : Any =value
elif weight_type == "bias":
A__ : Tuple =value
elif weight_type == "running_mean":
A__ : Dict =value
elif weight_type == "running_var":
A__ : List[str] =value
elif weight_type == "num_batches_tracked":
A__ : Dict =value
else:
A__ : Optional[int] =value
logger.info(F'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' )
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : Tuple ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
A__ , A__ : List[str] =key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Tuple =[]
if task == "s2t":
A__ : Dict =hf_model.speechta.encoder.prenet.feature_encoder
A__ : int =MAPPING_S2T
A__ : List[Any] =IGNORE_KEYS_S2T
elif task == "t2s":
A__ : Union[str, Any] =None
A__ : List[Any] =MAPPING_T2S
A__ : Tuple =IGNORE_KEYS_T2S
elif task == "s2s":
A__ : Optional[Any] =hf_model.speechta.encoder.prenet.feature_encoder
A__ : Tuple =MAPPING_S2S
A__ : Any =IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase , UpperCamelCase ):
logger.info(F'''{name} was ignored''' )
continue
A__ : Optional[Any] =False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , hf_model.config.feat_extract_norm == "group" , )
A__ : List[Any] =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
A__ , A__ : Dict =key.split(".*." )
if prefix in name and suffix in name:
A__ : int =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
A__ : List[Any] =True
if "*" in mapped_key:
A__ : Optional[int] =name.split(UpperCamelCase )[0].split("." )[-2]
A__ : int =mapped_key.replace("*" , UpperCamelCase )
if "weight_g" in name:
A__ : str ="weight_g"
elif "weight_v" in name:
A__ : Optional[Any] ="weight_v"
elif "bias" in name:
A__ : Any ="bias"
elif "weight" in name:
A__ : Optional[int] ="weight"
elif "running_mean" in name:
A__ : Tuple ="running_mean"
elif "running_var" in name:
A__ : Optional[int] ="running_var"
elif "num_batches_tracked" in name:
A__ : str ="num_batches_tracked"
else:
A__ : List[Any] =None
set_recursively(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
continue
if not is_used:
unused_weights.append(UpperCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Dict ):
"""simple docstring"""
A__ : Any =full_name.split("conv_layers." )[-1]
A__ : Dict =name.split("." )
A__ : int =int(items[0] )
A__ : str =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
A__ : Optional[Any] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
A__ : Optional[int] =value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
A__ : Any =value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase )
@torch.no_grad()
def lowercase ( UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : str=None , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ):
"""simple docstring"""
if config_path is not None:
A__ : Any =SpeechTaConfig.from_pretrained(UpperCamelCase )
else:
A__ : Any =SpeechTaConfig()
if task == "s2t":
A__ : Union[str, Any] =config.max_text_positions
A__ : Dict =SpeechTaForSpeechToText(UpperCamelCase )
elif task == "t2s":
A__ : str =1876
A__ : Optional[int] =600
A__ : Tuple =config.max_speech_positions
A__ : Optional[Any] =SpeechTaForTextToSpeech(UpperCamelCase )
elif task == "s2s":
A__ : str =1876
A__ : Tuple =config.max_speech_positions
A__ : Any =SpeechTaForSpeechToSpeech(UpperCamelCase )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
A__ : str =SpeechTaTokenizer(UpperCamelCase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
A__ : Optional[Any] =AddedToken("<mask>" , lstrip=UpperCamelCase , rstrip=UpperCamelCase )
A__ : int =mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
A__ : Dict =SpeechTaFeatureExtractor()
A__ : Tuple =SpeechTaProcessor(tokenizer=UpperCamelCase , feature_extractor=UpperCamelCase )
processor.save_pretrained(UpperCamelCase )
A__ : Union[str, Any] =torch.load(UpperCamelCase )
recursively_load_weights(fairseq_checkpoint["model"] , UpperCamelCase , UpperCamelCase )
model.save_pretrained(UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
__A : Dict = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A : str = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 656 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Tuple = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = """rwkv"""
__magic_name__ : int = {"""max_position_embeddings""": """context_length"""}
def __init__( self : Union[str, Any] , UpperCamelCase__ : Any=50277 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[Any]=4096 , UpperCamelCase__ : Any=32 , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Any=1E-5 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : Union[str, Any]=6 , UpperCamelCase__ : str=False , UpperCamelCase__ : Union[str, Any]=True , **UpperCamelCase__ : List[Any] , ):
A__ : Optional[Any] =vocab_size
A__ : List[str] =context_length
A__ : Tuple =hidden_size
A__ : Optional[int] =num_hidden_layers
A__ : int =attention_hidden_size if attention_hidden_size is not None else hidden_size
A__ : Dict =intermediate_size if intermediate_size is not None else 4 * hidden_size
A__ : Dict =layer_norm_epsilon
A__ : str =rescale_every
A__ : int =use_cache
A__ : Dict =bos_token_id
A__ : Optional[Any] =eos_token_id
super().__init__(
tie_word_embeddings=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 656 | """simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 50257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ):
super().__init__()
A__ : Dict =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
A__ : Optional[int] =prefix_inner_dim
A__ : Optional[int] =prefix_hidden_dim
A__ : Optional[int] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : str =GPTaConfig(
vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , )
A__ : Any =GPTaLMHeadModel(UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ):
A__ : int =self.transformer.transformer.wte(UpperCamelCase__ )
A__ : Tuple =self.encode_prefix(UpperCamelCase__ )
A__ : Union[str, Any] =self.decode_prefix(UpperCamelCase__ )
A__ : Tuple =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] =torch.cat((dummy_token, input_ids) , dim=1 )
A__ : Any =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ):
return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ):
return self.encode_prefix(UpperCamelCase__ )
@torch.no_grad()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
A__ : Optional[int] =torch.split(UpperCamelCase__ , 1 , dim=0 )
A__ : List[str] =[]
A__ : Dict =[]
for feature in features:
A__ : Any =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Optional[Any] =self.generate_beam(
input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : Optional[Any] =torch.stack(UpperCamelCase__ )
A__ : Optional[int] =torch.stack(UpperCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ):
A__ : str =eos_token_id
A__ : Optional[Any] =None
A__ : int =None
A__ : Union[str, Any] =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int )
A__ : Any =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Union[str, Any] =input_embeds
else:
A__ : Optional[Any] =self.transformer.transformer.wte(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
A__ : Optional[int] =self.transformer(inputs_embeds=UpperCamelCase__ )
A__ : Tuple =outputs.logits
A__ : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Optional[Any] =logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Union[str, Any] =logits.topk(UpperCamelCase__ , -1 )
A__ : Union[str, Any] =generated.expand(UpperCamelCase__ , *generated.shape[1:] )
A__ , A__ : Optional[int] =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : str =next_tokens
else:
A__ : Optional[Any] =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] )
A__ : str =torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Union[str, Any] =-float(np.inf )
A__ : Dict =0
A__ : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Optional[Any] =scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 )
A__ : Tuple =next_tokens // scores_sum.shape[1]
A__ : List[Any] =seq_lengths[next_tokens_source]
A__ : int =next_tokens % scores_sum.shape[1]
A__ : str =next_tokens.unsqueeze(1 )
A__ : List[Any] =tokens[next_tokens_source]
A__ : int =torch.cat((tokens, next_tokens) , dim=1 )
A__ : List[str] =generated[next_tokens_source]
A__ : Optional[Any] =scores_sum_average * seq_lengths
A__ : Optional[int] =is_stopped[next_tokens_source]
A__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : str =torch.cat((generated, next_token_embed) , dim=1 )
A__ : str =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze()
if is_stopped.all():
break
A__ : Optional[int] =scores / seq_lengths
A__ : List[Any] =scores.argsort(descending=UpperCamelCase__ )
# tokens tensors are already padded to max_seq_length
A__ : int =[tokens[i] for i in order]
A__ : Any =torch.stack(UpperCamelCase__ , dim=0 )
A__ : int =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
assert column_title.isupper()
A__ : List[str] =0
A__ : Tuple =len(UpperCamelCase ) - 1
A__ : List[str] =0
while index >= 0:
A__ : Union[str, Any] =(ord(column_title[index] ) - 64) * pow(26 , UpperCamelCase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 656 | """simple docstring"""
import os
def lowercase ( ):
"""simple docstring"""
A__ : List[Any] =os.path.dirname(os.path.realpath(UpperCamelCase ) )
A__ : str =os.path.join(UpperCamelCase , "triangle.txt" )
with open(UpperCamelCase ) as f:
A__ : Optional[int] =f.readlines()
A__ : str =[]
for line in triangle:
A__ : Union[str, Any] =[]
for number in line.strip().split(" " ):
numbers_from_line.append(int(UpperCamelCase ) )
a.append(UpperCamelCase )
for i in range(1 , len(UpperCamelCase ) ):
for j in range(len(a[i] ) ):
A__ : Union[str, Any] =a[i - 1][j] if j != len(a[i - 1] ) else 0
A__ : Union[str, Any] =a[i - 1][j - 1] if j > 0 else 0
a[i][j] += max(UpperCamelCase , UpperCamelCase )
return max(a[-1] )
if __name__ == "__main__":
print(solution())
| 656 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Dict = logging.get_logger(__name__)
__A : List[str] = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[Any] = """xlm-roberta-xl"""
def __init__( self : Any , UpperCamelCase__ : Optional[Any]=250880 , UpperCamelCase__ : Any=2560 , UpperCamelCase__ : str=36 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Tuple=10240 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=514 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Union[str, Any]=1E-05 , UpperCamelCase__ : int=1 , UpperCamelCase__ : Dict=0 , UpperCamelCase__ : List[str]=2 , UpperCamelCase__ : List[Any]="absolute" , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : Optional[int] , ):
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[Any] =vocab_size
A__ : List[Any] =hidden_size
A__ : Any =num_hidden_layers
A__ : Dict =num_attention_heads
A__ : int =hidden_act
A__ : str =intermediate_size
A__ : str =hidden_dropout_prob
A__ : List[str] =attention_probs_dropout_prob
A__ : Any =max_position_embeddings
A__ : Tuple =type_vocab_size
A__ : Optional[Any] =initializer_range
A__ : List[str] =layer_norm_eps
A__ : Any =position_embedding_type
A__ : int =use_cache
A__ : Optional[int] =classifier_dropout
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : str ):
if self.task == "multiple-choice":
A__ : List[str] ={0: "batch", 1: "choice", 2: "sequence"}
else:
A__ : Optional[Any] ={0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 656 | """simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__A : int = logging.get_logger(__name__)
def lowercase ( UpperCamelCase : Any ):
"""simple docstring"""
A__ : str =OrderedDict()
for key, value in state_dict.items():
if key.startswith("module.encoder" ):
A__ : Dict =key.replace("module.encoder" , "glpn.encoder" )
if key.startswith("module.decoder" ):
A__ : Optional[int] =key.replace("module.decoder" , "decoder.stages" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A__ : Tuple =key[key.find("patch_embed" ) + len("patch_embed" )]
A__ : Optional[Any] =key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(UpperCamelCase )-1}''' )
if "norm" in key:
A__ : Dict =key.replace("norm" , "layer_norm" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A__ : Any =key[key.find("glpn.encoder.layer_norm" ) + len("glpn.encoder.layer_norm" )]
A__ : Tuple =key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(UpperCamelCase )-1}''' )
if "layer_norm1" in key:
A__ : List[Any] =key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
A__ : Optional[int] =key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
A__ : int =key[key.find("block" ) + len("block" )]
A__ : Optional[Any] =key.replace(F'''block{idx}''' , F'''block.{int(UpperCamelCase )-1}''' )
if "attn.q" in key:
A__ : Optional[Any] =key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
A__ : Union[str, Any] =key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
A__ : str =key.replace("attn" , "attention.self" )
if "fc1" in key:
A__ : Dict =key.replace("fc1" , "dense1" )
if "fc2" in key:
A__ : str =key.replace("fc2" , "dense2" )
if "linear_pred" in key:
A__ : List[Any] =key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
A__ : List[str] =key.replace("linear_fuse.conv" , "linear_fuse" )
A__ : Any =key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A__ : str =key[key.find("linear_c" ) + len("linear_c" )]
A__ : Dict =key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(UpperCamelCase )-1}''' )
if "bot_conv" in key:
A__ : Union[str, Any] =key.replace("bot_conv" , "0.convolution" )
if "skip_conv1" in key:
A__ : List[Any] =key.replace("skip_conv1" , "1.convolution" )
if "skip_conv2" in key:
A__ : int =key.replace("skip_conv2" , "2.convolution" )
if "fusion1" in key:
A__ : Optional[Any] =key.replace("fusion1" , "1.fusion" )
if "fusion2" in key:
A__ : Optional[Any] =key.replace("fusion2" , "2.fusion" )
if "fusion3" in key:
A__ : int =key.replace("fusion3" , "3.fusion" )
if "fusion" in key and "conv" in key:
A__ : List[str] =key.replace("conv" , "convolutional_layer" )
if key.startswith("module.last_layer_depth" ):
A__ : Tuple =key.replace("module.last_layer_depth" , "head.head" )
A__ : int =value
return new_state_dict
def lowercase ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ):
"""simple docstring"""
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A__ : int =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' )
A__ : str =state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' )
# next, add keys and values (in that order) to the state dict
A__ : List[str] =kv_weight[
: config.hidden_sizes[i], :
]
A__ : Dict =kv_bias[: config.hidden_sizes[i]]
A__ : Any =kv_weight[
config.hidden_sizes[i] :, :
]
A__ : Any =kv_bias[config.hidden_sizes[i] :]
def lowercase ( ):
"""simple docstring"""
A__ : Optional[Any] ="http://images.cocodataset.org/val2017/000000039769.jpg"
A__ : List[Any] =Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return image
@torch.no_grad()
def lowercase ( UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : List[str]=False , UpperCamelCase : str=None ):
"""simple docstring"""
A__ : List[str] =GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
A__ : str =GLPNImageProcessor()
# prepare image
A__ : Any =prepare_img()
A__ : Optional[int] =image_processor(images=UpperCamelCase , return_tensors="pt" ).pixel_values
logger.info("Converting model..." )
# load original state dict
A__ : int =torch.load(UpperCamelCase , map_location=torch.device("cpu" ) )
# rename keys
A__ : Union[str, Any] =rename_keys(UpperCamelCase )
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase , UpperCamelCase )
# create HuggingFace model and load state dict
A__ : Optional[int] =GLPNForDepthEstimation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# forward pass
A__ : int =model(UpperCamelCase )
A__ : Optional[Any] =outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
A__ : List[Any] =torch.tensor(
[[4.41_47, 4.08_73, 4.06_73], [3.78_90, 3.28_81, 3.15_25], [3.76_74, 3.54_23, 3.49_13]] )
elif "kitti" in model_name:
A__ : Tuple =torch.tensor(
[[3.42_91, 2.78_65, 2.51_51], [3.28_41, 2.70_21, 2.35_02], [3.11_47, 2.46_25, 2.24_81]] )
else:
raise ValueError(F'''Unknown model name: {model_name}''' )
A__ : str =torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , UpperCamelCase , atol=1E-4 )
print("Looks ok!" )
# finally, push to hub if required
if push_to_hub:
logger.info("Pushing model and image processor to the hub..." )
model.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=UpperCamelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(UpperCamelCase , UpperCamelCase ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=UpperCamelCase , )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path",
default=None,
type=str,
help="Path to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub."
)
parser.add_argument(
"--model_name",
default="glpn-kitti",
type=str,
help="Name of the model in case you're pushing to the hub.",
)
__A : Any = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 656 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__A : str = {
"configuration_gpt_neox_japanese": ["GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoXJapaneseConfig"],
"tokenization_gpt_neox_japanese": ["GPTNeoXJapaneseTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoXJapaneseForCausalLM",
"GPTNeoXJapaneseLayer",
"GPTNeoXJapaneseModel",
"GPTNeoXJapanesePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__A : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | """simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = """gpt_neo"""
__magic_name__ : Union[str, Any] = ["""past_key_values"""]
__magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ):
A__ : Optional[Any] =vocab_size
A__ : Dict =max_position_embeddings
A__ : List[str] =hidden_size
A__ : List[Any] =num_layers
A__ : Tuple =num_heads
A__ : List[str] =intermediate_size
A__ : Tuple =window_size
A__ : Dict =activation_function
A__ : str =resid_dropout
A__ : Union[str, Any] =embed_dropout
A__ : List[str] =attention_dropout
A__ : Tuple =classifier_dropout
A__ : int =layer_norm_epsilon
A__ : int =initializer_range
A__ : str =use_cache
A__ : Tuple =bos_token_id
A__ : int =eos_token_id
A__ : int =attention_types
A__ : Any =self.expand_attention_types_params(UpperCamelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@staticmethod
def _UpperCAmelCase ( UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
import torch
A__ : List[str] =input.size()
A__ : Dict =len(UpperCamelCase )
A__ : Optional[int] =shape[dimension]
A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1
A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
A__ : Tuple =[slice(UpperCamelCase )] * rank
A__ : int =indices
A__ : Optional[int] =input[s]
A__ : Union[str, Any] =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ):
"""simple docstring"""
import torch
A__ : List[str] =torch.arange(1 , UpperCamelCase )
A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =remainders == 0
A__ : str =candidates[divisor_indices]
A__ : int =torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"}
else:
A__ : Tuple ={0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return self._config.num_heads
def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ : Union[str, Any] =seqlen + 2
A__ : List[Any] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : Optional[Any] =[
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A__ : Optional[Any] =common_inputs["attention_mask"]
if self.use_past:
A__ : Any =ordered_inputs["attention_mask"].dtype
A__ : Tuple =torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return 13
| 656 | 1 |
"""simple docstring"""
from __future__ import annotations
__A : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowercase ( UpperCamelCase : list[list[int]] , UpperCamelCase : list[int] , UpperCamelCase : list[int] , UpperCamelCase : int , UpperCamelCase : list[list[int]] , ):
"""simple docstring"""
A__ : Any =[
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase ) )
] # the reference grid
A__ : List[str] =1
A__ : Optional[int] =[
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase ) )
] # the action grid
A__ : Optional[Any] =init[0]
A__ : List[Any] =init[1]
A__ : int =0
A__ : Tuple =g + heuristic[x][y] # cost from starting cell to destination cell
A__ : List[str] =[[f, g, x, y]]
A__ : Union[str, Any] =False # flag that is set when search is complete
A__ : Dict =False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A__ : Optional[Any] =cell.pop()
A__ : List[Any] =next_cell[2]
A__ : List[Any] =next_cell[3]
A__ : Dict =next_cell[1]
if x == goal[0] and y == goal[1]:
A__ : str =True
else:
for i in range(len(UpperCamelCase ) ): # to try out different valid actions
A__ : List[Any] =x + DIRECTIONS[i][0]
A__ : int =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A__ : Union[str, Any] =g + cost
A__ : str =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A__ : Any =1
A__ : Tuple =i
A__ : List[str] =[]
A__ : Tuple =goal[0]
A__ : Union[str, Any] =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A__ : Dict =x - DIRECTIONS[action[x][y]][0]
A__ : int =y - DIRECTIONS[action[x][y]][1]
A__ : Union[str, Any] =xa
A__ : Optional[Any] =ya
invpath.append([x, y] )
A__ : Union[str, Any] =[]
for i in range(len(UpperCamelCase ) ):
path.append(invpath[len(UpperCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__A : int = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__A : Dict = [0, 0]
# all coordinates are given in format [y,x]
__A : Union[str, Any] = [len(grid) - 1, len(grid[0]) - 1]
__A : Any = 1
# the cost map which pushes the path closer to the goal
__A : Dict = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__A : Tuple = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__A : Union[str, Any] = 99
__A , __A : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 656 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Union[str, Any] = logging.get_logger(__name__)
__A : Any = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Tuple = """megatron-bert"""
def __init__( self : Tuple , UpperCamelCase__ : Dict=29056 , UpperCamelCase__ : int=1024 , UpperCamelCase__ : Optional[int]=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : int=4096 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : str=2 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : List[Any]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Dict=True , **UpperCamelCase__ : Tuple , ):
super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ )
A__ : Optional[int] =vocab_size
A__ : Optional[int] =hidden_size
A__ : str =num_hidden_layers
A__ : Any =num_attention_heads
A__ : str =hidden_act
A__ : Optional[int] =intermediate_size
A__ : str =hidden_dropout_prob
A__ : str =attention_probs_dropout_prob
A__ : List[Any] =max_position_embeddings
A__ : List[Any] =type_vocab_size
A__ : Tuple =initializer_range
A__ : Any =layer_norm_eps
A__ : Any =position_embedding_type
A__ : Union[str, Any] =use_cache
| 656 | 1 |
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowercase ( ):
"""simple docstring"""
A__ : Union[str, Any] ={
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
A__ : Union[str, Any] =Dataset.from_dict(UpperCamelCase )
return dataset
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def _UpperCAmelCase ( self : str ):
A__ : List[str] =get_dataset()
A__ : Optional[int] =make_duplicate_clusters(UpperCamelCase__ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _UpperCAmelCase ( self : Any ):
A__ : List[Any] =get_dataset()
A__ , A__ : Union[str, Any] =deduplicate_dataset(UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 2 )
print(UpperCamelCase__ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , UpperCamelCase__ )
| 656 | """simple docstring"""
from __future__ import annotations
def lowercase ( UpperCamelCase : list[float] ):
"""simple docstring"""
if len(UpperCamelCase ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
A__ : Union[str, Any] =nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : Union[str, Any] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__A : Union[str, Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A : Optional[Any] = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
__A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | 1 |
"""simple docstring"""
__A : str = range(2, 20 + 1)
__A : Dict = [10**k for k in range(ks[-1] + 1)]
__A : dict[int, dict[int, list[list[int]]]] = {}
def lowercase ( UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Any ):
"""simple docstring"""
A__ : Union[str, Any] =sum(a_i[j] for j in range(UpperCamelCase , len(UpperCamelCase ) ) )
A__ : Optional[int] =sum(a_i[j] * base[j] for j in range(min(len(UpperCamelCase ) , UpperCamelCase ) ) )
A__ , A__ : int =0, 0
A__ : Any =n - i
A__ : int =memo.get(UpperCamelCase )
if sub_memo is not None:
A__ : List[str] =sub_memo.get(UpperCamelCase )
if jumps is not None and len(UpperCamelCase ) > 0:
# find and make the largest jump without going over
A__ : List[Any] =-1
for _k in range(len(UpperCamelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
A__ : Tuple =_k
break
if max_jump >= 0:
A__ , A__ , A__ : int =jumps[max_jump]
# since the difference between jumps is cached, add c
A__ : Any =diff + c
for j in range(min(UpperCamelCase , len(UpperCamelCase ) ) ):
A__ , A__ : Any =divmod(UpperCamelCase , 10 )
if new_c > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
A__ : str =[]
else:
A__ : List[str] ={c: []}
A__ : Any =sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
A__ , A__ : Any =next_term(UpperCamelCase , k - 1 , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
A__ , A__ : List[str] =compute(UpperCamelCase , UpperCamelCase , i + dn , UpperCamelCase )
diff += _diff
dn += terms_jumped
A__ : int =sub_memo[c]
# keep jumps sorted by # of terms skipped
A__ : Dict =0
while j < len(UpperCamelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(UpperCamelCase , (diff, dn, k) )
return (diff, dn)
def lowercase ( UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[str] ):
"""simple docstring"""
if i >= n:
return 0, i
if k > len(UpperCamelCase ):
a_i.extend([0 for _ in range(k - len(UpperCamelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
A__ : Optional[int] =i
A__ , A__ , A__ : Tuple =0, 0, 0
for j in range(len(UpperCamelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
A__ : str =ds_c + ds_b
diff += addend
A__ : Optional[int] =0
for j in range(UpperCamelCase ):
A__ : Optional[Any] =a_i[j] + addend
A__ , A__ : int =divmod(UpperCamelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return diff, i - start_i
def lowercase ( UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : Any ):
"""simple docstring"""
for j in range(UpperCamelCase , len(UpperCamelCase ) ):
A__ : str =digits[j] + addend
if s >= 10:
A__ , A__ : List[str] =divmod(UpperCamelCase , 10 )
A__ : Union[str, Any] =addend // 10 + quotient
else:
A__ : Any =s
A__ : List[Any] =addend // 10
if addend == 0:
break
while addend > 0:
A__ , A__ : Dict =divmod(UpperCamelCase , 10 )
digits.append(UpperCamelCase )
def lowercase ( UpperCamelCase : int = 10**15 ):
"""simple docstring"""
A__ : List[str] =[1]
A__ : Any =1
A__ : Tuple =0
while True:
A__ , A__ : Optional[int] =next_term(UpperCamelCase , 20 , i + dn , UpperCamelCase )
dn += terms_jumped
if dn == n - i:
break
A__ : str =0
for j in range(len(UpperCamelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 656 | """simple docstring"""
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
if num <= 0:
raise ValueError("Input must be a positive integer" )
A__ : Union[str, Any] =[True] * (num + 1)
A__ : Union[str, Any] =2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , UpperCamelCase ):
A__ : str =False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[int] = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 656 | 1 |
"""simple docstring"""
__A : Union[str, Any] = frozenset(
[
"prompt",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : Optional[int] = frozenset(["prompt", "negative_prompt"])
__A : str = frozenset([])
__A : List[Any] = frozenset(["image"])
__A : Optional[int] = frozenset(
[
"image",
"height",
"width",
"guidance_scale",
]
)
__A : List[Any] = frozenset(["image"])
__A : Optional[int] = frozenset(
[
"prompt",
"image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : str = frozenset(["prompt", "image", "negative_prompt"])
__A : str = frozenset(
[
# Text guided image variation with an image mask
"prompt",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
]
)
__A : List[str] = frozenset(["prompt", "image", "mask_image", "negative_prompt"])
__A : Optional[Any] = frozenset(
[
# image variation with an image mask
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : Any = frozenset(["image", "mask_image"])
__A : Optional[Any] = frozenset(
[
"example_image",
"image",
"mask_image",
"height",
"width",
"guidance_scale",
]
)
__A : Dict = frozenset(["example_image", "image", "mask_image"])
__A : str = frozenset(["class_labels"])
__A : Optional[Any] = frozenset(["class_labels"])
__A : List[Any] = frozenset(["batch_size"])
__A : int = frozenset([])
__A : Dict = frozenset(["batch_size"])
__A : int = frozenset([])
__A : str = frozenset(
[
"prompt",
"audio_length_in_s",
"guidance_scale",
"negative_prompt",
"prompt_embeds",
"negative_prompt_embeds",
"cross_attention_kwargs",
]
)
__A : str = frozenset(["prompt", "negative_prompt"])
__A : Optional[int] = frozenset(["input_tokens"])
__A : Optional[int] = frozenset(["input_tokens"])
| 656 | """simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
A__ : Tuple =torch.nn.Linear(10 , 10 )
A__ : List[str] =torch.optim.SGD(model.parameters() , 0.1 )
A__ : Union[str, Any] =Accelerator()
A__ : str =accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A : str = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 656 | """simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__A : Optional[int] = None
__A : Union[str, Any] = logging.get_logger(__name__)
__A : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__A : str = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
__A : List[str] = {
"google/bigbird-roberta-base": 4_096,
"google/bigbird-roberta-large": 4_096,
"google/bigbird-base-trivia-itc": 4_096,
}
__A : Tuple = "▁"
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = VOCAB_FILES_NAMES
__magic_name__ : Any = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ : List[Any] = BigBirdTokenizer
__magic_name__ : Any = ["""input_ids""", """attention_mask"""]
__magic_name__ : List[int] = []
def __init__( self : str , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : str="<s>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[Any]="[MASK]" , UpperCamelCase__ : str="[CLS]" , **UpperCamelCase__ : List[Any] , ):
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else bos_token
A__ : Optional[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else eos_token
A__ : Optional[int] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else unk_token
A__ : int =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else pad_token
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else cls_token
A__ : List[Any] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A__ : str =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
A__ : List[Any] =vocab_file
A__ : Optional[int] =False if not self.vocab_file else True
def _UpperCAmelCase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1]
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : Tuple =[self.sep_token_id]
A__ : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : List[str] =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 656 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( _UpperCamelCase , unittest.TestCase):
'''simple docstring'''
__magic_name__ : Optional[Any] = KandinskyVaaInpaintPipeline
__magic_name__ : Any = ["""image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
__magic_name__ : List[str] = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
__magic_name__ : Tuple = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
__magic_name__ : List[str] = False
@property
def _UpperCAmelCase ( self : Any ):
return 32
@property
def _UpperCAmelCase ( self : int ):
return 32
@property
def _UpperCAmelCase ( self : List[str] ):
return self.time_input_dim
@property
def _UpperCAmelCase ( self : Any ):
return self.time_input_dim * 4
@property
def _UpperCAmelCase ( self : Optional[Any] ):
return 100
@property
def _UpperCAmelCase ( self : Optional[Any] ):
torch.manual_seed(0 )
A__ : str ={
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
A__ : List[str] =UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def _UpperCAmelCase ( self : Any ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
A__ : str =VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCAmelCase ( self : List[str] ):
A__ : List[str] =self.dummy_unet
A__ : List[str] =self.dummy_movq
A__ : Any =DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=UpperCamelCase__ , set_alpha_to_one=UpperCamelCase__ , steps_offset=1 , prediction_type="epsilon" , thresholding=UpperCamelCase__ , )
A__ : Dict ={
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int]=0 ):
A__ : Optional[Any] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Optional[Any] =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create init_image
A__ : Optional[Any] =floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
A__ : Any =image.cpu().permute(0 , 2 , 3 , 1 )[0]
A__ : Any =Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert("RGB" ).resize((256, 256) )
# create mask
A__ : Dict =np.ones((64, 64) , dtype=np.floataa )
A__ : List[Any] =0
if str(UpperCamelCase__ ).startswith("mps" ):
A__ : List[Any] =torch.manual_seed(UpperCamelCase__ )
else:
A__ : Optional[int] =torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
A__ : str ={
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def _UpperCAmelCase ( self : Union[str, Any] ):
A__ : Any ="cpu"
A__ : List[str] =self.get_dummy_components()
A__ : Any =self.pipeline_class(**UpperCamelCase__ )
A__ : Dict =pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : List[str] =pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
A__ : Optional[Any] =output.images
A__ : Optional[int] =pipe(
**self.get_dummy_inputs(UpperCamelCase__ ) , return_dict=UpperCamelCase__ , )[0]
A__ : Any =image[0, -3:, -3:, -1]
A__ : Dict =image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
A__ : int =np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def _UpperCAmelCase ( self : Union[str, Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self : str ):
A__ : str =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
A__ : Tuple =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
A__ : str =np.ones((768, 768) , dtype=np.floataa )
A__ : str =0
A__ : str ="a hat"
A__ : List[Any] =KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
A__ : List[str] =KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
A__ : str =pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
A__ : Optional[Any] =torch.Generator(device="cpu" ).manual_seed(0 )
A__ , A__ : Union[str, Any] =pipe_prior(
UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
A__ : List[Any] =pipeline(
image=UpperCamelCase__ , mask_image=UpperCamelCase__ , image_embeds=UpperCamelCase__ , negative_image_embeds=UpperCamelCase__ , generator=UpperCamelCase__ , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
A__ : List[str] =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__ , UpperCamelCase__ )
| 656 | """simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
__A : Optional[int] = logging.get_logger(__name__)
__A : Optional[int] = {"vocab_file": "spiece.model"}
__A : List[Any] = {
"vocab_file": {
"TsinghuaAI/CPM-Generate": "https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model",
}
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
def __init__( self : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Union[str, Any]="<unk>" , UpperCamelCase__ : Optional[int]="<sep>" , UpperCamelCase__ : Optional[int]="<pad>" , UpperCamelCase__ : Optional[int]="<cls>" , UpperCamelCase__ : List[str]="<mask>" , UpperCamelCase__ : Optional[Any]=["<eop>", "<eod>"] , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Dict , ):
A__ : List[str] =AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
A__ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
A__ : Dict =3
A__ : int =do_lower_case
A__ : str =remove_space
A__ : Optional[Any] =keep_accents
A__ : int =vocab_file
A__ : Dict =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase__ )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"You need to install jieba to use CpmTokenizer or CpmTokenizerFast. "
"See https://pypi.org/project/jieba/ for installation." )
A__ : Union[str, Any] =jieba
A__ : List[str] =str.maketrans(" \n" , "\u2582\u2583" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _UpperCAmelCase ( self : Union[str, Any] ):
return len(self.sp_model )
def _UpperCAmelCase ( self : Optional[int] ):
A__ : Any ={self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
A__ : Union[str, Any] =self.__dict__.copy()
A__ : Tuple =None
return state
def __setstate__( self : Tuple , UpperCamelCase__ : int ):
A__ : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
A__ : Optional[int] ={}
A__ : Union[str, Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Dict ):
if self.remove_space:
A__ : Optional[int] =" ".join(inputs.strip().split() )
else:
A__ : Optional[Any] =inputs
A__ : Any =outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
A__ : Optional[Any] =unicodedata.normalize("NFKD" , UpperCamelCase__ )
A__ : Tuple ="".join([c for c in outputs if not unicodedata.combining(UpperCamelCase__ )] )
if self.do_lower_case:
A__ : str =outputs.lower()
return outputs
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : str ):
A__ : Optional[int] =self.preprocess_text(UpperCamelCase__ )
A__ : Dict =self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
A__ : List[str] =[]
for piece in pieces:
if len(UpperCamelCase__ ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
A__ : str =self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase__ , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
A__ : Union[str, Any] =cur_pieces[1:]
else:
A__ : List[str] =cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase__ )
else:
new_pieces.append(UpperCamelCase__ )
return new_pieces
def _UpperCAmelCase ( self : int , UpperCamelCase__ : str ):
return self.sp_model.PieceToId(UpperCamelCase__ )
def _UpperCAmelCase ( self : List[str] , UpperCamelCase__ : List[Any] ):
return self.sp_model.IdToPiece(UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : str ):
A__ : Optional[int] ="".join(UpperCamelCase__ ).replace(UpperCamelCase__ , " " ).strip()
return out_string
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : str =[self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1, 1]
return ([0] * len(UpperCamelCase__ )) + [1, 1]
def _UpperCAmelCase ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
A__ : List[str] =[self.sep_token_id]
A__ : Optional[Any] =[2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _UpperCAmelCase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
if not os.path.isdir(UpperCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
A__ : Tuple =os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , "wb" ) as fi:
A__ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
return (out_vocab_file,)
def _UpperCAmelCase ( self : str , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : int ):
A__ : List[Any] =super()._decode(*UpperCamelCase__ , **UpperCamelCase__ )
A__ : Union[str, Any] =text.replace(" " , "" ).replace("\u2582" , " " ).replace("\u2583" , "\n" )
return text
| 656 | 1 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
__A : Optional[int] = logging.get_logger("transformers.models.speecht5")
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[int] ):
"""simple docstring"""
hf_model.apply_weight_norm()
A__ : List[Any] =checkpoint["input_conv.weight_g"]
A__ : List[Any] =checkpoint["input_conv.weight_v"]
A__ : Optional[Any] =checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
A__ : Dict =checkpoint[F'''upsamples.{i}.1.weight_g''']
A__ : List[Any] =checkpoint[F'''upsamples.{i}.1.weight_v''']
A__ : Any =checkpoint[F'''upsamples.{i}.1.bias''']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
A__ : str =checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_g''']
A__ : int =checkpoint[F'''blocks.{i}.convs1.{j}.1.weight_v''']
A__ : str =checkpoint[F'''blocks.{i}.convs1.{j}.1.bias''']
A__ : int =checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_g''']
A__ : List[str] =checkpoint[F'''blocks.{i}.convs2.{j}.1.weight_v''']
A__ : List[Any] =checkpoint[F'''blocks.{i}.convs2.{j}.1.bias''']
A__ : Any =checkpoint["output_conv.1.weight_g"]
A__ : Optional[Any] =checkpoint["output_conv.1.weight_v"]
A__ : Tuple =checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def lowercase ( UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Any=None , UpperCamelCase : List[str]=None , ):
"""simple docstring"""
if config_path is not None:
A__ : Dict =SpeechTaHifiGanConfig.from_pretrained(UpperCamelCase )
else:
A__ : Dict =SpeechTaHifiGanConfig()
A__ : List[str] =SpeechTaHifiGan(UpperCamelCase )
A__ : Tuple =torch.load(UpperCamelCase )
load_weights(orig_checkpoint["model"]["generator"] , UpperCamelCase , UpperCamelCase )
A__ : Union[str, Any] =np.load(UpperCamelCase )
A__ : Optional[int] =stats[0].reshape(-1 )
A__ : Any =stats[1].reshape(-1 )
A__ : Any =torch.from_numpy(UpperCamelCase ).float()
A__ : List[Any] =torch.from_numpy(UpperCamelCase ).float()
model.save_pretrained(UpperCamelCase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
__A : str = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
__A : str = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 656 | """simple docstring"""
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations(UpperCamelCase : int ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
def count_of_possible_combinations_with_dp_array(
UpperCamelCase : int , UpperCamelCase : list[int] ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A__ : str =sum(
count_of_possible_combinations_with_dp_array(target - item , UpperCamelCase )
for item in array )
A__ : List[str] =answer
return answer
A__ : List[Any] =[-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(UpperCamelCase , UpperCamelCase )
def lowercase ( UpperCamelCase : int , UpperCamelCase : list[int] , UpperCamelCase : int ):
"""simple docstring"""
A__ : str =[0] * (target + 1)
A__ : Optional[Any] =1
for i in range(1 , target + 1 ):
for j in range(UpperCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__A : Optional[Any] = 3
__A : Optional[Any] = 5
__A : int = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 656 | 1 |
"""simple docstring"""
def lowercase ( UpperCamelCase : str , UpperCamelCase : int ):
"""simple docstring"""
A__ : list[list[str]] =[[] for _ in range(UpperCamelCase )]
A__ : Union[str, Any] =key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(UpperCamelCase ) <= key:
return input_string
for position, character in enumerate(UpperCamelCase ):
A__ : Any =position % (lowest * 2) # puts it in bounds
A__ : List[str] =min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCamelCase )
A__ : str =["".join(UpperCamelCase ) for row in temp_grid]
A__ : Optional[Any] ="".join(UpperCamelCase )
return output_string
def lowercase ( UpperCamelCase : str , UpperCamelCase : int ):
"""simple docstring"""
A__ : Optional[Any] =[]
A__ : Optional[int] =key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
A__ : list[list[str]] =[[] for _ in range(UpperCamelCase )] # generates template
for position in range(len(UpperCamelCase ) ):
A__ : Any =position % (lowest * 2) # puts it in bounds
A__ : int =min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
A__ : str =0
for row in temp_grid: # fills in the characters
A__ : Union[str, Any] =input_string[counter : counter + len(UpperCamelCase )]
grid.append(list(UpperCamelCase ) )
counter += len(UpperCamelCase )
A__ : Optional[Any] ="" # reads as zigzag
for position in range(len(UpperCamelCase ) ):
A__ : Tuple =position % (lowest * 2) # puts it in bounds
A__ : Optional[Any] =min(UpperCamelCase , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def lowercase ( UpperCamelCase : str ):
"""simple docstring"""
A__ : Dict ={}
for key_guess in range(1 , len(UpperCamelCase ) ): # tries every key
A__ : Union[str, Any] =decrypt(UpperCamelCase , UpperCamelCase )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 656 | """simple docstring"""
import math
import tensorflow as tf
from packaging import version
def lowercase ( UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A__ : List[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : Optional[Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : Tuple =tf.cast(math.pi , x.dtype )
A__ : Dict =tf.cast(0.04_47_15 , x.dtype )
A__ : Optional[int] =0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(UpperCamelCase , 3 )) ))
return x * cdf
def lowercase ( UpperCamelCase : Optional[int] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
return x * tf.tanh(tf.math.softplus(UpperCamelCase ) )
def lowercase ( UpperCamelCase : List[str] ):
"""simple docstring"""
A__ : Union[str, Any] =tf.convert_to_tensor(UpperCamelCase )
A__ : List[Any] =tf.cast(0.04_47_15 , x.dtype )
A__ : List[Any] =tf.cast(0.79_78_84_56_08 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
A__ : List[str] =tf.convert_to_tensor(UpperCamelCase )
A__ : str =tf.cast(1.7_02 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def lowercase ( UpperCamelCase : Tuple ):
"""simple docstring"""
return tf.clip_by_value(_gelu(UpperCamelCase ) , -10 , 10 )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any=-1 ):
"""simple docstring"""
A__ , A__ : Optional[Any] =tf.split(UpperCamelCase , 2 , axis=UpperCamelCase )
return a * tf.math.sigmoid(UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse("2.4"):
def lowercase ( UpperCamelCase : int ):
"""simple docstring"""
return tf.keras.activations.gelu(UpperCamelCase , approximate=UpperCamelCase )
__A : Optional[Any] = tf.keras.activations.gelu
__A : Optional[Any] = approximate_gelu_wrap
else:
__A : Any = _gelu
__A : Union[str, Any] = _gelu_new
__A : List[str] = {
"gelu": gelu,
"gelu_10": gelu_aa,
"gelu_fast": gelu_fast,
"gelu_new": gelu_new,
"glu": glu,
"mish": mish,
"quick_gelu": quick_gelu,
"relu": tf.keras.activations.relu,
"sigmoid": tf.keras.activations.sigmoid,
"silu": tf.keras.activations.swish,
"swish": tf.keras.activations.swish,
"tanh": tf.keras.activations.tanh,
}
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
| 656 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.