code stringlengths 82 54.1k | code_codestyle int64 0 699 | style_context stringlengths 111 35.6k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
import os
from datetime import datetime as dt
from github import Github
a =[
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def SCREAMING_SNAKE_CASE__ ( ) -> str:
__lowerCamelCase : List[Any] = Github(os.environ['GITHUB_TOKEN'] )
__lowerCamelCase : int = g.get_repo('huggingface/diffusers' )
__lowerCamelCase : int = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCamelCase : Dict = sorted(issue.get_comments() , key=lambda lowerCamelCase__ : i.created_at , reverse=lowerCamelCase__ )
__lowerCamelCase : Dict = comments[0] if len(lowerCamelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 652 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 ,SCREAMING_SNAKE_CASE__ : int=4_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
__lowerCamelCase : str = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : int = num_channels
__lowerCamelCase : Dict = min_resolution
__lowerCamelCase : Tuple = max_resolution
__lowerCamelCase : Dict = do_resize
__lowerCamelCase : List[Any] = size
__lowerCamelCase : Tuple = do_normalize
__lowerCamelCase : Any = image_mean
__lowerCamelCase : List[str] = image_std
__lowerCamelCase : List[Any] = do_rescale
__lowerCamelCase : str = rescale_factor
__lowerCamelCase : Tuple = do_pad
def lowerCAmelCase ( self : Dict):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]=False):
if not batched:
__lowerCamelCase : Optional[Any] = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ ,Image.Image):
__lowerCamelCase , __lowerCamelCase : Any = image.size
else:
__lowerCamelCase , __lowerCamelCase : Any = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase : Optional[int] = int(self.size['shortest_edge'] * h / w)
__lowerCamelCase : Tuple = self.size['shortest_edge']
elif w > h:
__lowerCamelCase : Union[str, Any] = self.size['shortest_edge']
__lowerCamelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h)
else:
__lowerCamelCase : List[Any] = self.size['shortest_edge']
__lowerCamelCase : Optional[int] = self.size['shortest_edge']
else:
__lowerCamelCase : List[str] = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__lowerCamelCase : Tuple = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[0])[0]
__lowerCamelCase : Dict = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[int] = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = DetaImageProcessingTester(self)
@property
def lowerCAmelCase ( self : Any):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_rescale'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_pad'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size'))
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size ,{'shortest_edge': 1_8, 'longest_edge': 1_3_3_3})
self.assertEqual(image_processor.do_pad ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
pass
def lowerCAmelCase ( self : List[str]):
# Initialize image_processing
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image)
# Test not batched input
__lowerCamelCase : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase ( self : str):
# Initialize image_processing
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray)
# Test not batched input
__lowerCamelCase : Tuple = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase ( self : int):
# Initialize image_processing
__lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor)
# Test not batched input
__lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def lowerCAmelCase ( self : Optional[Any]):
# prepare image and target
__lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r') as f:
__lowerCamelCase : List[str] = json.loads(f.read())
__lowerCamelCase : Union[str, Any] = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
__lowerCamelCase : Optional[int] = DetaImageProcessor()
__lowerCamelCase : int = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
# verify pixel values
__lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
# verify area
__lowerCamelCase : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__))
# verify boxes
__lowerCamelCase : int = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# verify image_id
__lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__))
# verify is_crowd
__lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__))
# verify class_labels
__lowerCamelCase : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__))
# verify orig_size
__lowerCamelCase : str = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__))
# verify size
__lowerCamelCase : int = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
@slow
def lowerCAmelCase ( self : str):
# prepare image, target and masks_path
__lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r') as f:
__lowerCamelCase : Tuple = json.loads(f.read())
__lowerCamelCase : List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
__lowerCamelCase : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
__lowerCamelCase : List[str] = DetaImageProcessor(format='coco_panoptic')
__lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,masks_path=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
# verify pixel values
__lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
# verify area
__lowerCamelCase : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__))
# verify boxes
__lowerCamelCase : Tuple = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# verify image_id
__lowerCamelCase : int = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__))
# verify is_crowd
__lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__))
# verify class_labels
__lowerCamelCase : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__))
# verify masks
__lowerCamelCase : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,SCREAMING_SNAKE_CASE__)
# verify orig_size
__lowerCamelCase : Any = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__))
# verify size
__lowerCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a ={
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = KandinskyVaaControlnetPipeline
_UpperCAmelCase : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : int = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Tuple = False
@property
def lowerCAmelCase ( self : Tuple):
return 3_2
@property
def lowerCAmelCase ( self : List[Any]):
return 3_2
@property
def lowerCAmelCase ( self : str):
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[str]):
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : List[str]):
return 1_0_0
@property
def lowerCAmelCase ( self : Dict):
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__)
return model
@property
def lowerCAmelCase ( self : Union[str, Any]):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Optional[Any]):
torch.manual_seed(0)
__lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = self.dummy_unet
__lowerCamelCase : List[Any] = self.dummy_movq
__lowerCamelCase : str = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='linear' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Dict = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=0):
__lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to(
SCREAMING_SNAKE_CASE__)
# create hint
__lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = 'cpu'
__lowerCamelCase : Tuple = self.get_dummy_components()
__lowerCamelCase : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = output.images
__lowerCamelCase : Tuple = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0]
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : List[str] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : int):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy')
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
__lowerCamelCase : Tuple = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__)).float() / 255.0
__lowerCamelCase : str = hint.permute(2 ,0 ,1).unsqueeze(0)
__lowerCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa)
pipe_prior.to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa)
__lowerCamelCase : int = pipeline.to(SCREAMING_SNAKE_CASE__)
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 'A robot, 4k photo'
__lowerCamelCase : List[str] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe_prior(
SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
__lowerCamelCase : Optional[Any] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase : Any = pipeline(
image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,hint=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,output_type='np' ,)
__lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
| 652 | 1 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
if (
not isinstance(lowerCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
if (
not isinstance(lowerCamelCase__ , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
_UpperCAmelCase : int = XGLMConfig
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Tuple = '''gelu'''
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_4 ,SCREAMING_SNAKE_CASE__ : Tuple=7 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=9_9 ,SCREAMING_SNAKE_CASE__ : str=3_2 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 ,SCREAMING_SNAKE_CASE__ : Tuple=3_7 ,SCREAMING_SNAKE_CASE__ : Tuple="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=5_1_2 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : str = seq_length
__lowerCamelCase : Optional[Any] = is_training
__lowerCamelCase : Any = use_input_mask
__lowerCamelCase : str = use_labels
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : Dict = d_model
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : List[str] = ffn_dim
__lowerCamelCase : Optional[Any] = activation_function
__lowerCamelCase : Tuple = activation_dropout
__lowerCamelCase : Union[str, Any] = attention_dropout
__lowerCamelCase : List[str] = max_position_embeddings
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Any = None
__lowerCamelCase : List[str] = 0
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Dict = 1
def lowerCAmelCase ( self : Any):
return XGLMConfig.from_pretrained('facebook/xglm-564M')
def lowerCAmelCase ( self : str):
__lowerCamelCase : Any = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) ,clip_value_min=0 ,clip_value_max=3)
__lowerCamelCase : Dict = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : int = self.get_config()
__lowerCamelCase : Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase ( self : List[Any]):
return XGLMConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=SCREAMING_SNAKE_CASE__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : int):
__lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Any = config_and_inputs
__lowerCamelCase : str = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_UpperCAmelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
_UpperCAmelCase : str = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Union[str, Any] = False
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Tuple = TFXGLMModelTester(self)
__lowerCamelCase : int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,n_embd=3_7)
def lowerCAmelCase ( self : List[Any]):
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase ( self : str):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.')
def lowerCAmelCase ( self : Union[str, Any]):
super().test_resize_token_embeddings()
@require_tf
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True):
__lowerCamelCase : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : int = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] ,dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : Optional[Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowerCamelCase : int = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Tuple = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
tf.random.set_seed(0)
__lowerCamelCase : Optional[Any] = tokenizer('Today is a nice day and' ,return_tensors='tf')
__lowerCamelCase : List[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0'):
__lowerCamelCase : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,seed=[7, 0])
__lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : Union[str, Any] = 'left'
# use different length sentences to test batching
__lowerCamelCase : List[str] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors='tf' ,padding=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = inputs['input_ids']
__lowerCamelCase : Dict = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=inputs['attention_mask'] ,max_new_tokens=1_2)
__lowerCamelCase : Tuple = tokenizer(sentences[0] ,return_tensors='tf').input_ids
__lowerCamelCase : List[str] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2)
__lowerCamelCase : Any = tokenizer(sentences[1] ,return_tensors='tf').input_ids
__lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2)
__lowerCamelCase : int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tokenizer.decode(output_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,[non_padded_sentence, padded_sentence])
| 652 | 1 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> float:
__lowerCamelCase : Optional[int] = u
for i in range(1 , lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = temp * (u - i)
return temp
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : Union[str, Any] = int(input('enter the numbers of values: ' ) )
__lowerCamelCase : list[list[float]] = []
for _ in range(lowerCamelCase__ ):
y.append([] )
for i in range(lowerCamelCase__ ):
for j in range(lowerCamelCase__ ):
y[i].append(lowerCamelCase__ )
__lowerCamelCase : str = 0
print('enter the values of parameters in a list: ' )
__lowerCamelCase : List[Any] = list(map(lowerCamelCase__ , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = float(input() )
__lowerCamelCase : Dict = int(input('enter the value to interpolate: ' ) )
__lowerCamelCase : Optional[int] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , lowerCamelCase__ ):
for j in range(n - i ):
__lowerCamelCase : Union[str, Any] = y[j + 1][i - 1] - y[j][i - 1]
__lowerCamelCase : str = y[0][0]
for i in range(1 , lowerCamelCase__ ):
summ += (ucal(lowerCamelCase__ , lowerCamelCase__ ) * y[0][i]) / math.factorial(lowerCamelCase__ )
print(F"the value at {value} is {summ}" )
if __name__ == "__main__":
main()
| 652 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 652 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list:
return [
txt[:a] + txt[a].upper() + txt[a + 1 :]
for a in range(len(lowerCamelCase__ ) )
if txt[a].isalpha()
]
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 652 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a =logging.get_logger(__name__)
# General docstring
a ="""RegNetConfig"""
# Base docstring
a ="""facebook/regnet-y-040"""
a =[1, 1088, 7, 7]
# Image classification docstring
a ="""facebook/regnet-y-040"""
a ="""tabby, tabby cat"""
a =[
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCamelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
__lowerCamelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,strides=SCREAMING_SNAKE_CASE__ ,padding='VALID' ,groups=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution' ,)
__lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
__lowerCamelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : List[Any] = self.convolution(self.padding(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Dict):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = config.num_channels
__lowerCamelCase : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[int] = shape_list(SCREAMING_SNAKE_CASE__)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCamelCase : Optional[int] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 2, 3, 1))
__lowerCamelCase : List[Any] = self.embedder(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,strides=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution')
__lowerCamelCase : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False):
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__)
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
__lowerCamelCase : Dict = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='relu' ,name='attention.0'),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2'),
]
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCamelCase : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE__)
for layer_module in self.attention:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = hidden_state * pooled
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[Any]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = in_channels != out_channels or stride != 1
__lowerCamelCase : Union[str, Any] = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : Dict = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.2'),
]
__lowerCamelCase : Dict = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : int = hidden_state
for layer_module in self.layers:
__lowerCamelCase : List[str] = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : int = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[str]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = in_channels != out_channels or stride != 1
__lowerCamelCase : Tuple = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : int = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetSELayer(SCREAMING_SNAKE_CASE__ ,reduced_channels=int(round(in_channels / 4)) ,name='layer.2'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.3'),
]
__lowerCamelCase : List[Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[int] = hidden_state
for layer_module in self.layers:
__lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : Any = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
__lowerCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='layers.0'),
*[layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,name=F"layers.{i+1}") for i in range(depth - 1)],
]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]):
for layer_module in self.layers:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,))
__lowerCamelCase : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ ,config.depths[1:])):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,depth=SCREAMING_SNAKE_CASE__ ,name=F"stages.{i+1}"))
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True):
__lowerCamelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase : Optional[Any] = hidden_states + (hidden_state,)
__lowerCamelCase : str = stage_module(SCREAMING_SNAKE_CASE__)
if output_hidden_states:
__lowerCamelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__)
@keras_serializable
class A_ ( tf.keras.layers.Layer ):
_UpperCAmelCase : List[Any] = RegNetConfig
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = config
__lowerCamelCase : Optional[int] = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ ,name='embedder')
__lowerCamelCase : Union[str, Any] = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ ,name='encoder')
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
@unpack_inputs
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,):
__lowerCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = encoder_outputs[0]
__lowerCamelCase : int = self.pooler(SCREAMING_SNAKE_CASE__)
# Change to NCHW output format have uniformity in the modules
__lowerCamelCase : Union[str, Any] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
__lowerCamelCase : str = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCamelCase : Union[str, Any] = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = RegNetConfig
_UpperCAmelCase : Optional[int] = '''regnet'''
_UpperCAmelCase : List[Any] = '''pixel_values'''
@property
def lowerCAmelCase ( self : int):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa)}
a =r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
a =r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : int=False ,):
__lowerCamelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : str):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = config.num_labels
__lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
# classification head
__lowerCamelCase : Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='classifier.1') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Any=False ,):
__lowerCamelCase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : str = self.regnet(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : Optional[Any] = self.classifier[0](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.classifier[1](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__)
if not return_dict:
__lowerCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states)
| 652 | 1 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
__lowerCamelCase : int = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1 / 1_2_3_4_5 ) -> int:
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Optional[Any] = 3
while True:
__lowerCamelCase : int = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCamelCase__ ):
__lowerCamelCase : Dict = int(lowerCamelCase__ )
total_partitions += 1
if check_partition_perfect(lowerCamelCase__ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCamelCase__ )
integer += 1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 652 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a =object()
# For specifying empty leaf dict `{}`
a =object()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Tuple = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(lowerCamelCase__ ) - len(lowerCamelCase__ ) + 1 ):
__lowerCamelCase : str = [x.match(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , ks[i:] )]
if matches and all(lowerCamelCase__ ):
return True
return False
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
def replace(lowerCamelCase__ , lowerCamelCase__ ):
for rule, replacement in rules:
if _match(lowerCamelCase__ , lowerCamelCase__ ):
return replacement
return val
return replace
def SCREAMING_SNAKE_CASE__ ( ) -> str:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , lowerCamelCase__ )),
(("transformer", "wte", "embedding"), P('mp' , lowerCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = _get_partition_rules()
__lowerCamelCase : Optional[Any] = _replacement_rules(lowerCamelCase__ )
__lowerCamelCase : Tuple = {k: _unmatched for k in flatten_dict(lowerCamelCase__ )}
__lowerCamelCase : List[Any] = {k: replace(lowerCamelCase__ , lowerCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCamelCase__ ) )
| 652 | 1 |
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' , [
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : str = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' , 'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
__lowerCamelCase : Tuple = DatasetInfosDict.from_directory(lowerCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
'dataset_info' , [
DatasetInfo(),
DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=4_2 , ),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : Optional[Any] = str(lowerCamelCase__ )
dataset_info.write_to_directory(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = DatasetInfo.from_directory(lowerCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase__ , 'dataset_info.json' ) )
def SCREAMING_SNAKE_CASE__ ( ) -> int:
__lowerCamelCase : Tuple = DatasetInfo(
description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 4_2}] , download_checksums={} , download_size=1_3_3_7 , post_processing_size=4_4_2 , dataset_size=1_2_3_4 , size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4 , )
__lowerCamelCase : Any = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) )
__lowerCamelCase : Any = yaml.safe_dump(lowerCamelCase__ )
__lowerCamelCase : List[str] = yaml.safe_load(lowerCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
__lowerCamelCase : Dict = DatasetInfo()
__lowerCamelCase : int = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' , [
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=4_2 , )
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=4_2 ),
'v2': DatasetInfo(dataset_size=1_3_3_7 ),
} ),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[Any] = str(lowerCamelCase__ )
dataset_infos_dict.write_to_directory(lowerCamelCase__ )
__lowerCamelCase : Dict = DatasetInfosDict.from_directory(lowerCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
__lowerCamelCase : Dict = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
__lowerCamelCase : Tuple = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase__ , 'README.md' ) )
| 652 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list:
__lowerCamelCase : Union[str, Any] = [True] * n
__lowerCamelCase : List[Any] = False
__lowerCamelCase : int = False
__lowerCamelCase : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__lowerCamelCase : List[str] = i * 2
while index < n:
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = index + i
__lowerCamelCase : Optional[Any] = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int:
__lowerCamelCase : Optional[Any] = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0
__lowerCamelCase : Dict = prime_sieve(lowerCamelCase__ )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 0
__lowerCamelCase : Any = primes[prime_index]
while (last_prime**2) <= limit:
__lowerCamelCase : Any = primes[prime_index + 1]
__lowerCamelCase : Optional[Any] = last_prime**2
__lowerCamelCase : Dict = next_prime**2
# Get numbers divisible by lps(current)
__lowerCamelCase : Tuple = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__lowerCamelCase : Any = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__lowerCamelCase : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__lowerCamelCase : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 652 | 1 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : List[Any] = tempfile.mkdtemp()
__lowerCamelCase : Any = SamImageProcessor()
__lowerCamelCase : Tuple = SamProcessor(SCREAMING_SNAKE_CASE__)
processor.save_pretrained(self.tmpdirname)
def lowerCAmelCase ( self : str ,**SCREAMING_SNAKE_CASE__ : Dict):
return AutoProcessor.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__).image_processor
def lowerCAmelCase ( self : List[Any]):
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta)]
__lowerCamelCase : Dict = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ ,0 ,-1)) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self : str):
__lowerCamelCase : Any = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__lowerCamelCase : List[Any] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ ,padding_value=1.0)
__lowerCamelCase : Dict = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=SCREAMING_SNAKE_CASE__ ,padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Optional[Any] = self.get_image_processor()
__lowerCamelCase : List[str] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
__lowerCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='np')
__lowerCamelCase : Dict = processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2)
@require_torch
def lowerCAmelCase ( self : int):
__lowerCamelCase : Optional[Any] = self.get_image_processor()
__lowerCamelCase : str = SamProcessor(image_processor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = [torch.ones((1, 3, 5, 5))]
__lowerCamelCase : Any = [[1_7_6_4, 2_6_4_6]]
__lowerCamelCase : Dict = [[6_8_3, 1_0_2_4]]
__lowerCamelCase : int = processor.post_process_masks(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6))
__lowerCamelCase : Union[str, Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE__ ,torch.tensor(SCREAMING_SNAKE_CASE__) ,torch.tensor(SCREAMING_SNAKE_CASE__))
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6))
# should also work with np
__lowerCamelCase : Any = [np.ones((1, 3, 5, 5))]
__lowerCamelCase : str = processor.post_process_masks(SCREAMING_SNAKE_CASE__ ,np.array(SCREAMING_SNAKE_CASE__) ,np.array(SCREAMING_SNAKE_CASE__))
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6))
__lowerCamelCase : Dict = [[1, 0], [0, 1]]
with self.assertRaises(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[str] = processor.post_process_masks(SCREAMING_SNAKE_CASE__ ,np.array(SCREAMING_SNAKE_CASE__) ,np.array(SCREAMING_SNAKE_CASE__))
@require_vision
@require_tf
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : int = tempfile.mkdtemp()
__lowerCamelCase : Any = SamImageProcessor()
__lowerCamelCase : Optional[int] = SamProcessor(SCREAMING_SNAKE_CASE__)
processor.save_pretrained(self.tmpdirname)
def lowerCAmelCase ( self : Any ,**SCREAMING_SNAKE_CASE__ : Any):
return AutoProcessor.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__).image_processor
def lowerCAmelCase ( self : Optional[Any]):
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : int = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta)]
__lowerCamelCase : str = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ ,0 ,-1)) for x in image_inputs]
return image_inputs
def lowerCAmelCase ( self : int):
__lowerCamelCase : Optional[int] = SamProcessor(image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
__lowerCamelCase : List[str] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ ,padding_value=1.0)
__lowerCamelCase : int = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=SCREAMING_SNAKE_CASE__ ,padding_value=1.0)
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Tuple = self.get_image_processor()
__lowerCamelCase : Dict = SamProcessor(image_processor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.prepare_image_inputs()
__lowerCamelCase : List[Any] = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='np')
__lowerCamelCase : List[Any] = processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='np')
input_feat_extract.pop('original_sizes') # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes') # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2)
@require_tf
def lowerCAmelCase ( self : int):
__lowerCamelCase : Any = self.get_image_processor()
__lowerCamelCase : List[str] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = [tf.ones((1, 3, 5, 5))]
__lowerCamelCase : List[str] = [[1_7_6_4, 2_6_4_6]]
__lowerCamelCase : Optional[int] = [[6_8_3, 1_0_2_4]]
__lowerCamelCase : List[str] = processor.post_process_masks(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_tensors='tf')
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6))
__lowerCamelCase : Optional[int] = processor.post_process_masks(
SCREAMING_SNAKE_CASE__ ,tf.convert_to_tensor(SCREAMING_SNAKE_CASE__) ,tf.convert_to_tensor(SCREAMING_SNAKE_CASE__) ,return_tensors='tf' ,)
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6))
# should also work with np
__lowerCamelCase : List[Any] = [np.ones((1, 3, 5, 5))]
__lowerCamelCase : Dict = processor.post_process_masks(
SCREAMING_SNAKE_CASE__ ,np.array(SCREAMING_SNAKE_CASE__) ,np.array(SCREAMING_SNAKE_CASE__) ,return_tensors='tf')
self.assertEqual(masks[0].shape ,(1, 3, 1_7_6_4, 2_6_4_6))
__lowerCamelCase : str = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError):
__lowerCamelCase : Union[str, Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE__ ,np.array(SCREAMING_SNAKE_CASE__) ,np.array(SCREAMING_SNAKE_CASE__) ,return_tensors='tf')
@require_vision
@require_torchvision
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : str = tempfile.mkdtemp()
__lowerCamelCase : Any = SamImageProcessor()
__lowerCamelCase : str = SamProcessor(SCREAMING_SNAKE_CASE__)
processor.save_pretrained(self.tmpdirname)
def lowerCAmelCase ( self : int ,**SCREAMING_SNAKE_CASE__ : Union[str, Any]):
return AutoProcessor.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__).image_processor
def lowerCAmelCase ( self : Union[str, Any]):
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[int] = [np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta)]
__lowerCamelCase : List[Any] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ ,0 ,-1)) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = self.get_image_processor()
__lowerCamelCase : Optional[Any] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = np.random.randint(0 ,2 ,size=(1, 3, 5, 5)).astype(np.floataa)
__lowerCamelCase : Tuple = [tf.convert_to_tensor(SCREAMING_SNAKE_CASE__)]
__lowerCamelCase : Any = [torch.tensor(SCREAMING_SNAKE_CASE__)]
__lowerCamelCase : Tuple = [[1_7_6_4, 2_6_4_6]]
__lowerCamelCase : int = [[6_8_3, 1_0_2_4]]
__lowerCamelCase : Any = processor.post_process_masks(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_tensors='tf')
__lowerCamelCase : Optional[Any] = processor.post_process_masks(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy()))
@is_pt_tf_cross_test
def lowerCAmelCase ( self : int):
__lowerCamelCase : Tuple = self.get_image_processor()
__lowerCamelCase : Any = SamProcessor(image_processor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.prepare_image_inputs()
__lowerCamelCase : str = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='pt')['pixel_values'].numpy()
__lowerCamelCase : Optional[Any] = processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')['pixel_values'].numpy()
__lowerCamelCase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='tf')['pixel_values'].numpy()
__lowerCamelCase : Optional[Any] = processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='tf')['pixel_values'].numpy()
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
| 652 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = 1
@register_to_config
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.15 ,SCREAMING_SNAKE_CASE__ : float = 0.01 ,SCREAMING_SNAKE_CASE__ : float = 1348.0 ,SCREAMING_SNAKE_CASE__ : float = 1E-5 ,SCREAMING_SNAKE_CASE__ : int = 1 ,):
# standard deviation of the initial noise distribution
__lowerCamelCase : int = sigma_max
# setable values
__lowerCamelCase : List[str] = None
self.set_sigmas(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[int] = None):
return sample
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None):
__lowerCamelCase : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowerCamelCase : Optional[int] = torch.linspace(1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None):
__lowerCamelCase : Optional[int] = sigma_min if sigma_min is not None else self.config.sigma_min
__lowerCamelCase : Optional[int] = sigma_max if sigma_max is not None else self.config.sigma_max
__lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowerCamelCase : Optional[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE__) ,math.log(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str]):
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
__lowerCamelCase : List[str] = timestep * torch.ones(
sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
__lowerCamelCase : str = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowerCamelCase : Dict = timesteps.to(self.discrete_sigmas.device)
__lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device)
__lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).to(sample.device)
__lowerCamelCase : int = torch.zeros_like(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowerCamelCase : int = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
__lowerCamelCase : List[Any] = diffusion.unsqueeze(-1)
__lowerCamelCase : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowerCamelCase : int = randn_tensor(
sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__ ,device=sample.device ,dtype=sample.dtype)
__lowerCamelCase : Optional[int] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowerCamelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ ,prev_sample_mean=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowerCamelCase : Optional[int] = randn_tensor(sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__).to(sample.device)
# compute step size from the model_output, the noise, and the snr
__lowerCamelCase : str = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowerCamelCase : Optional[int] = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
__lowerCamelCase : List[str] = step_size.unsqueeze(-1)
__lowerCamelCase : str = sample + step_size * model_output
__lowerCamelCase : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase : int = timesteps.to(original_samples.device)
__lowerCamelCase : Any = self.discrete_sigmas.to(original_samples.device)[timesteps]
__lowerCamelCase : Optional[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(SCREAMING_SNAKE_CASE__) * sigmas[:, None, None, None]
)
__lowerCamelCase : str = noise + original_samples
return noisy_samples
def __len__( self : Optional[int]):
return self.config.num_train_timesteps
| 652 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
assert column_title.isupper()
__lowerCamelCase : List[Any] = 0
__lowerCamelCase : str = len(lowerCamelCase__ ) - 1
__lowerCamelCase : int = 0
while index >= 0:
__lowerCamelCase : List[Any] = (ord(column_title[index] ) - 6_4) * pow(2_6 , lowerCamelCase__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 652 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : str = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : int = features[:, labels == i]
__lowerCamelCase : Optional[int] = data.mean(1 )
# Centralize the data of class i
__lowerCamelCase : int = data - column_reshape(lowerCamelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCamelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : Union[str, Any] = np.dot(lowerCamelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = features.mean(1 )
__lowerCamelCase : Union[str, Any] = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = features[:, labels == i]
__lowerCamelCase : Union[str, Any] = data.shape[1]
__lowerCamelCase : Union[str, Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : List[str] = device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
__lowerCamelCase : Tuple = features.mean(1 )
# Center the dataset
__lowerCamelCase : Any = features - np.reshape(lowerCamelCase__ , (data_mean.size, 1) )
__lowerCamelCase : Optional[int] = np.dot(lowerCamelCase__ , centered_data.T ) / features.shape[1]
__lowerCamelCase , __lowerCamelCase : List[Any] = np.linalg.eigh(lowerCamelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowerCamelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowerCamelCase : int = np.dot(filtered_eigenvectors.T , lowerCamelCase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowerCamelCase , __lowerCamelCase : Dict = eigh(
covariance_between_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , covariance_within_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , )
__lowerCamelCase : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = np.linalg.svd(lowerCamelCase__ )
__lowerCamelCase : int = svd_matrix[:, 0:dimensions]
__lowerCamelCase : Optional[int] = np.dot(filtered_svd_matrix.T , lowerCamelCase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
__lowerCamelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowerCamelCase : Optional[int] = np.array([0, 0, 0, 1, 1] )
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Tuple = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : int = linear_discriminant_analysis(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if isinstance(lowerCamelCase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowerCamelCase : Dict = 2
__lowerCamelCase : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : Optional[Any] = principal_component_analysis(lowerCamelCase__ , lowerCamelCase__ )
if not np.allclose(lowerCamelCase__ , lowerCamelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : str = '''upernet'''
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Tuple=0.02 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[1, 2, 3, 6] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : str=0.4 ,SCREAMING_SNAKE_CASE__ : str=3_8_4 ,SCREAMING_SNAKE_CASE__ : Tuple=2_5_6 ,SCREAMING_SNAKE_CASE__ : Dict=1 ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_5_5 ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
__lowerCamelCase : int = CONFIG_MAPPING['resnet'](out_features=['stage1', 'stage2', 'stage3', 'stage4'])
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[Any] = backbone_config.get('model_type')
__lowerCamelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type]
__lowerCamelCase : List[Any] = config_class.from_dict(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = backbone_config
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : Optional[int] = initializer_range
__lowerCamelCase : Dict = pool_scales
__lowerCamelCase : Optional[int] = use_auxiliary_head
__lowerCamelCase : Optional[Any] = auxiliary_loss_weight
__lowerCamelCase : Any = auxiliary_in_channels
__lowerCamelCase : Tuple = auxiliary_channels
__lowerCamelCase : Optional[int] = auxiliary_num_convs
__lowerCamelCase : str = auxiliary_concat_input
__lowerCamelCase : int = loss_ignore_index
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : str = copy.deepcopy(self.__dict__)
__lowerCamelCase : Optional[Any] = self.backbone_config.to_dict()
__lowerCamelCase : List[Any] = self.__class__.model_type
return output
| 652 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))]
if identifier is not None:
__lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
for n_ in n_identifier:
__lowerCamelCase : Optional[int] = [file for file in files if n_ not in file]
else:
__lowerCamelCase : Dict = [file for file in files if n_identifier not in file]
__lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py')
__lowerCamelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,SCREAMING_SNAKE_CASE__)
if only_modules:
__lowerCamelCase : Optional[int] = file.split('.')[0]
try:
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__)
self.assertIs(len(result.failures) ,0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed ,0)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = 'modeling'
__lowerCamelCase : Dict = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = Path('src/transformers')
__lowerCamelCase : Optional[int] = 'tokenization'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = Path('src/transformers')
__lowerCamelCase : str = 'configuration'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = Path('docs/source')
__lowerCamelCase : str = ['favicon.ico']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
| 652 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a =logging.get_logger(__name__)
a ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a ={
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
a ={
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
a ={
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : int = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : int = DistilBertTokenizer
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Tuple="[UNK]" ,SCREAMING_SNAKE_CASE__ : Tuple="[SEP]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="[PAD]" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="[CLS]" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="[MASK]" ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=None ,**SCREAMING_SNAKE_CASE__ : Any ,):
super().__init__(
SCREAMING_SNAKE_CASE__ ,tokenizer_file=SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' ,SCREAMING_SNAKE_CASE__) != do_lower_case
or normalizer_state.get('strip_accents' ,SCREAMING_SNAKE_CASE__) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,SCREAMING_SNAKE_CASE__) != tokenize_chinese_chars
):
__lowerCamelCase : str = getattr(SCREAMING_SNAKE_CASE__ ,normalizer_state.pop('type'))
__lowerCamelCase : Union[str, Any] = do_lower_case
__lowerCamelCase : str = strip_accents
__lowerCamelCase : Optional[Any] = tokenize_chinese_chars
__lowerCamelCase : int = normalizer_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = do_lower_case
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple=None):
__lowerCamelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : List[str] = [self.sep_token_id]
__lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
__lowerCamelCase : Any = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ ,name=SCREAMING_SNAKE_CASE__)
return tuple(SCREAMING_SNAKE_CASE__)
| 652 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ="""▁"""
a ={"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
a ={
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
a ={"""vinai/bartpho-syllable""": 1024}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Any="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="<s>" ,SCREAMING_SNAKE_CASE__ : int="<unk>" ,SCREAMING_SNAKE_CASE__ : Dict="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<mask>" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token
__lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : int = vocab_file
__lowerCamelCase : Tuple = monolingual_vocab_file
__lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__))
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : List[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids:
__lowerCamelCase : Any = cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE__ ,'r' ,encoding='utf-8') as f:
for line in f.readlines():
__lowerCamelCase : Any = line.strip().split()[0]
__lowerCamelCase : List[str] = len(self.fairseq_tokens_to_ids)
if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids:
__lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids)
__lowerCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int):
__lowerCamelCase : Tuple = self.__dict__.copy()
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : str = {}
__lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : Tuple = [self.cls_token_id]
__lowerCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Dict = [self.sep_token_id]
__lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def lowerCAmelCase ( self : List[str]):
return len(self.fairseq_ids_to_tokens)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict):
return self.fairseq_ids_to_tokens[index]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Any = ''.join(SCREAMING_SNAKE_CASE__).replace(SCREAMING_SNAKE_CASE__ ,' ').strip()
return out_string
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,)
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
SCREAMING_SNAKE_CASE__) and os.path.isfile(self.monolingual_vocab_file):
copyfile(self.monolingual_vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.monolingual_vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"{str(SCREAMING_SNAKE_CASE__)} \n")
return out_vocab_file, out_monolingual_vocab_file
| 652 | 1 |
import argparse
import os
import re
import packaging.version
a ="""examples/"""
a ={
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
a ={
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
a ="""README.md"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
with open(lowerCamelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Optional[int] = f.read()
__lowerCamelCase , __lowerCamelCase : int = REPLACE_PATTERNS[pattern]
__lowerCamelCase : str = replace.replace('VERSION' , lowerCamelCase__ )
__lowerCamelCase : List[Any] = re_pattern.sub(lowerCamelCase__ , lowerCamelCase__ )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
for folder, directories, fnames in os.walk(lowerCamelCase__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) , lowerCamelCase__ , pattern='examples' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> int:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if not patch:
update_version_in_examples(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
__lowerCamelCase : Optional[Any] = '🤗 Transformers currently provides the following architectures'
__lowerCamelCase : Tuple = '1. Want to contribute a new model?'
with open(lowerCamelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
# Find the start of the list.
__lowerCamelCase : Union[str, Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__lowerCamelCase : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
__lowerCamelCase : Optional[int] = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(lowerCamelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
with open(REPLACE_FILES['init'] , 'r' ) as f:
__lowerCamelCase : Optional[Any] = f.read()
__lowerCamelCase : int = REPLACE_PATTERNS['init'][0].search(lowerCamelCase__ ).groups()[0]
return packaging.version.parse(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=False ) -> Tuple:
__lowerCamelCase : str = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
__lowerCamelCase : Tuple = default_version.base_version
elif patch:
__lowerCamelCase : Optional[int] = F"{default_version.major}.{default_version.minor}.{default_version.micro + 1}"
else:
__lowerCamelCase : Any = F"{default_version.major}.{default_version.minor + 1}.0"
# Now let's ask nicely if that's the right one.
__lowerCamelCase : Optional[Any] = input(F"Which version are you releasing? [{default_version}]" )
if len(lowerCamelCase__ ) == 0:
__lowerCamelCase : int = default_version
print(F"Updating version to {version}." )
global_version_update(lowerCamelCase__ , patch=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
__lowerCamelCase : Optional[int] = get_version()
__lowerCamelCase : Dict = F"{current_version.major}.{current_version.minor + 1}.0.dev0"
__lowerCamelCase : str = current_version.base_version
# Check with the user we got that right.
__lowerCamelCase : List[str] = input(F"Which version are we developing now? [{dev_version}]" )
if len(lowerCamelCase__ ) == 0:
__lowerCamelCase : List[Any] = dev_version
print(F"Updating version to {version}." )
global_version_update(lowerCamelCase__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
a =parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work()
| 652 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict=1_3 ,SCREAMING_SNAKE_CASE__ : int=3_0 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4 ,SCREAMING_SNAKE_CASE__ : List[str]=3_7 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,):
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Dict = image_size
__lowerCamelCase : Optional[Any] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : str = is_training
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Dict = type_sequence_label_size
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : List[str] = scope
__lowerCamelCase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCamelCase : str = (image_size // patch_size) ** 2
__lowerCamelCase : str = num_patches + 2
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : List[Any] = None
if self.use_labels:
__lowerCamelCase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : List[Any]):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Optional[Any] = TFDeiTModel(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Optional[int] = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__lowerCamelCase : int = 1
__lowerCamelCase : Tuple = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : Dict = self.type_sequence_label_size
__lowerCamelCase : List[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
# test greyscale images
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Tuple = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : List[Any] = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[int] = False
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = TFDeiTModelTester(self)
__lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7)
def lowerCAmelCase ( self : str):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Dict):
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer))
__lowerCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,tf.keras.layers.Dense))
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str=False):
__lowerCamelCase : Optional[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__)
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
__lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : List[Any]):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224')
__lowerCamelCase : int = self.default_image_processor
__lowerCamelCase : Tuple = prepare_img()
__lowerCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='tf')
# forward pass
__lowerCamelCase : int = model(**SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : Optional[int] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tf.constant([-1.0266, 0.1912, -1.2861])
self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 652 | 1 |
import qiskit
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> qiskit.result.counts.Counts:
__lowerCamelCase : List[str] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
__lowerCamelCase : int = qiskit.QuantumCircuit(lowerCamelCase__ , lowerCamelCase__ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
__lowerCamelCase : str = qiskit.execute(lowerCamelCase__ , lowerCamelCase__ , shots=1_0_0_0 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowerCamelCase__ )
if __name__ == "__main__":
print(F"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 652 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
_UpperCAmelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 652 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))]
if identifier is not None:
__lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
for n_ in n_identifier:
__lowerCamelCase : Optional[int] = [file for file in files if n_ not in file]
else:
__lowerCamelCase : Dict = [file for file in files if n_identifier not in file]
__lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py')
__lowerCamelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,SCREAMING_SNAKE_CASE__)
if only_modules:
__lowerCamelCase : Optional[int] = file.split('.')[0]
try:
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__)
self.assertIs(len(result.failures) ,0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed ,0)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = 'modeling'
__lowerCamelCase : Dict = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = Path('src/transformers')
__lowerCamelCase : Optional[int] = 'tokenization'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = Path('src/transformers')
__lowerCamelCase : str = 'configuration'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = Path('docs/source')
__lowerCamelCase : str = ['favicon.ico']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
| 652 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 652 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = '''timm_backbone'''
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Any=None ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = backbone
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = features_only
__lowerCamelCase : Optional[Any] = use_pretrained_backbone
__lowerCamelCase : Dict = True
__lowerCamelCase : int = out_indices if out_indices is not None else (-1,)
| 652 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """vocab.txt"""}
a ={
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
a ={
"""openbmb/cpm-ant-10b""": 1024,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : int = collections.OrderedDict()
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as reader:
__lowerCamelCase : Optional[int] = reader.readlines()
for index, token in enumerate(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = token.rstrip('\n' )
__lowerCamelCase : Union[str, Any] = index
return vocab
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_0):
__lowerCamelCase : str = vocab
__lowerCamelCase : Dict = unk_token
__lowerCamelCase : int = max_input_chars_per_word
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : int = list(SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > self.max_input_chars_per_word:
return [self.unk_token]
__lowerCamelCase : Tuple = 0
__lowerCamelCase : str = []
while start < len(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = None
while start < end:
__lowerCamelCase : Any = ''.join(chars[start:end])
if substr in self.vocab:
__lowerCamelCase : Optional[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = end
return sub_tokens
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : Optional[int] = False
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple="<d>" ,SCREAMING_SNAKE_CASE__ : Tuple="</d>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" ,SCREAMING_SNAKE_CASE__ : str="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<unk>" ,SCREAMING_SNAKE_CASE__ : List[Any]="</n>" ,SCREAMING_SNAKE_CASE__ : int="</_>" ,SCREAMING_SNAKE_CASE__ : List[Any]="left" ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
requires_backends(self ,['jieba'])
super().__init__(
bod_token=SCREAMING_SNAKE_CASE__ ,eod_token=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,line_token=SCREAMING_SNAKE_CASE__ ,space_token=SCREAMING_SNAKE_CASE__ ,padding_side=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Optional[Any] = bod_token
__lowerCamelCase : Dict = eod_token
__lowerCamelCase : Any = load_vocab(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.encoder[space_token]
__lowerCamelCase : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowerCamelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1]))
__lowerCamelCase : int = {v: k for k, v in self.encoder.items()}
__lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token)
@property
def lowerCAmelCase ( self : List[Any]):
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : Tuple):
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : Union[str, Any]):
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : str):
return len(self.encoder)
def lowerCAmelCase ( self : str):
return dict(self.encoder ,**self.added_tokens_encoder)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Any = []
for x in jieba.cut(SCREAMING_SNAKE_CASE__ ,cut_all=SCREAMING_SNAKE_CASE__):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__))
return output_tokens
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Tuple = [i for i in token_ids if i >= 0]
__lowerCamelCase : str = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any]):
return token in self.encoder
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]):
return "".join(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]):
return self.encoder.get(SCREAMING_SNAKE_CASE__ ,self.encoder.get(self.unk_token))
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
return self.decoder.get(SCREAMING_SNAKE_CASE__ ,self.unk_token)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if os.path.isdir(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
__lowerCamelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
__lowerCamelCase : Any = 0
if " " in self.encoder:
__lowerCamelCase : Any = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__lowerCamelCase : str = self.encoder['\n']
del self.encoder["\n"]
__lowerCamelCase : str = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1]))
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!')
__lowerCamelCase : Any = token_index
writer.write(token + '\n')
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : List[int] = None):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
| 652 | 1 |
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = DDIMPipeline
_UpperCAmelCase : Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
_UpperCAmelCase : Tuple = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
_UpperCAmelCase : str = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase : List[Any] = False
def lowerCAmelCase ( self : Any):
torch.manual_seed(0)
__lowerCamelCase : Optional[int] = UNetaDModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=3 ,out_channels=3 ,down_block_types=('DownBlock2D', 'AttnDownBlock2D') ,up_block_types=('AttnUpBlock2D', 'UpBlock2D') ,)
__lowerCamelCase : Union[str, Any] = DDIMScheduler()
__lowerCamelCase : Union[str, Any] = {'unet': unet, 'scheduler': scheduler}
return components
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Dict=0):
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : Any = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : str = 'cpu'
__lowerCamelCase : Union[str, Any] = self.get_dummy_components()
__lowerCamelCase : str = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = pipe(**SCREAMING_SNAKE_CASE__).images
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 3_2, 3_2, 3))
__lowerCamelCase : Optional[Any] = np.array(
[1.0_00E00, 5.7_17E-01, 4.7_17E-01, 1.0_00E00, 0.0_00E00, 1.0_00E00, 3.0_00E-04, 0.0_00E00, 9.0_00E-04])
__lowerCamelCase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ ,1E-3)
def lowerCAmelCase ( self : Dict):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def lowerCAmelCase ( self : Optional[Any]):
super().test_save_load_local(expected_max_difference=3E-3)
def lowerCAmelCase ( self : Any):
super().test_save_load_optional_components(expected_max_difference=3E-3)
def lowerCAmelCase ( self : Union[str, Any]):
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : int = 'google/ddpm-cifar10-32'
__lowerCamelCase : List[str] = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = DDIMScheduler()
__lowerCamelCase : Optional[int] = DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__)
ddim.to(SCREAMING_SNAKE_CASE__)
ddim.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.manual_seed(0)
__lowerCamelCase : Optional[int] = ddim(generator=SCREAMING_SNAKE_CASE__ ,eta=0.0 ,output_type='numpy').images
__lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__lowerCamelCase : Union[str, Any] = np.array([0.1723, 0.1617, 0.1600, 0.1626, 0.1497, 0.1513, 0.1505, 0.1442, 0.1453])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = 'google/ddpm-ema-bedroom-256'
__lowerCamelCase : Tuple = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = DDIMScheduler.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = DDIMPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__)
ddpm.to(SCREAMING_SNAKE_CASE__)
ddpm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = ddpm(generator=SCREAMING_SNAKE_CASE__ ,output_type='numpy').images
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__lowerCamelCase : List[str] = np.array([0.0060, 0.0201, 0.0344, 0.0024, 0.0018, 0.0002, 0.0022, 0.0000, 0.0069])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
| 652 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 | 1 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : str = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : int = features[:, labels == i]
__lowerCamelCase : Optional[int] = data.mean(1 )
# Centralize the data of class i
__lowerCamelCase : int = data - column_reshape(lowerCamelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCamelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : Union[str, Any] = np.dot(lowerCamelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = features.mean(1 )
__lowerCamelCase : Union[str, Any] = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = features[:, labels == i]
__lowerCamelCase : Union[str, Any] = data.shape[1]
__lowerCamelCase : Union[str, Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : List[str] = device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
__lowerCamelCase : Tuple = features.mean(1 )
# Center the dataset
__lowerCamelCase : Any = features - np.reshape(lowerCamelCase__ , (data_mean.size, 1) )
__lowerCamelCase : Optional[int] = np.dot(lowerCamelCase__ , centered_data.T ) / features.shape[1]
__lowerCamelCase , __lowerCamelCase : List[Any] = np.linalg.eigh(lowerCamelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowerCamelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowerCamelCase : int = np.dot(filtered_eigenvectors.T , lowerCamelCase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowerCamelCase , __lowerCamelCase : Dict = eigh(
covariance_between_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , covariance_within_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , )
__lowerCamelCase : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = np.linalg.svd(lowerCamelCase__ )
__lowerCamelCase : int = svd_matrix[:, 0:dimensions]
__lowerCamelCase : Optional[int] = np.dot(filtered_svd_matrix.T , lowerCamelCase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
__lowerCamelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowerCamelCase : Optional[int] = np.array([0, 0, 0, 1, 1] )
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Tuple = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : int = linear_discriminant_analysis(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if isinstance(lowerCamelCase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowerCamelCase : Dict = 2
__lowerCamelCase : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : Optional[Any] = principal_component_analysis(lowerCamelCase__ , lowerCamelCase__ )
if not np.allclose(lowerCamelCase__ , lowerCamelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = (UnCLIPScheduler,)
def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Any = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**SCREAMING_SNAKE_CASE__)
return config
def lowerCAmelCase ( self : Optional[Any]):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log')
__lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range')
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = 0.5
assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5
assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5
assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : str = self.get_scheduler_config()
__lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = scheduler.timesteps
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter
__lowerCamelCase : List[str] = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Optional[Any] = pred_prev_sample
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 252.2682495) < 1E-2
assert abs(result_mean.item() - 0.3284743) < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(2_5)
__lowerCamelCase : int = scheduler.timesteps
__lowerCamelCase : Tuple = self.dummy_model()
__lowerCamelCase : Any = self.dummy_sample_deter
__lowerCamelCase : Any = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if i + 1 == timesteps.shape[0]:
__lowerCamelCase : Optional[Any] = None
else:
__lowerCamelCase : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : int = scheduler.step(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Union[str, Any] = pred_prev_sample
__lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 258.2044983) < 1E-2
assert abs(result_mean.item() - 0.3362038) < 1E-3
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Union[str, Any]):
pass
| 652 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> list[str]:
if nth_term == "":
return [""]
__lowerCamelCase : Tuple = int(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = int(lowerCamelCase__ )
__lowerCamelCase : list[str] = []
for temp in range(int(lowerCamelCase__ ) ):
series.append(F"1 / {pow(temp + 1 , int(lowerCamelCase__ ) )}" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
a =int(input("""Enter the last number (nth term) of the P-Series"""))
a =int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 652 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = '''swin2sr'''
_UpperCAmelCase : Any = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=1_8_0 ,SCREAMING_SNAKE_CASE__ : Any=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : int=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : int="1conv" ,SCREAMING_SNAKE_CASE__ : Optional[int]="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = image_size
__lowerCamelCase : str = patch_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : Dict = depths
__lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = num_heads
__lowerCamelCase : Tuple = window_size
__lowerCamelCase : Dict = mlp_ratio
__lowerCamelCase : str = qkv_bias
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Dict = use_absolute_embeddings
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : List[Any] = upscale
__lowerCamelCase : List[Any] = img_range
__lowerCamelCase : List[str] = resi_connection
__lowerCamelCase : Union[str, Any] = upsampler
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a ={
"""configuration_vision_text_dual_encoder""": ["""VisionTextDualEncoderConfig"""],
"""processing_vision_text_dual_encoder""": ["""VisionTextDualEncoderProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""VisionTextDualEncoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""FlaxVisionTextDualEncoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""TFVisionTextDualEncoderModel"""]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 652 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict:
__lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {}
__lowerCamelCase : int = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]:
__lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,):
super().__init__()
__lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source')
__lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target')
__lowerCamelCase : List[Any] = self.get_char_lens(self.src_file)
__lowerCamelCase : List[Any] = max_source_length
__lowerCamelCase : List[str] = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Optional[int] = prefix
if n_obs is not None:
__lowerCamelCase : Dict = self.src_lens[:n_obs]
__lowerCamelCase : str = src_lang
__lowerCamelCase : Any = tgt_lang
def __len__( self : Tuple):
return len(self.src_lens)
def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Dict = index + 1 # linecache starts at 1
__lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
__lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowerCamelCase : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
)
__lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
__lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right')
__lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right')
__lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int):
return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()]
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch])
__lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch])
__lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch])
__lowerCamelCase : Optional[int] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
a =getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return list(itertools.chain.from_iterable(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
__lowerCamelCase : str = get_git_info()
save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]:
with open(lowerCamelCase__ , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
with open(lowerCamelCase__ ) as f:
return json.load(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ )
__lowerCamelCase : Any = {
'repo_id': str(lowerCamelCase__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List:
return list(map(lowerCamelCase__ , lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
with open(lowerCamelCase__ , 'wb' ) as f:
return pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
def remove_articles(lowerCamelCase__ ):
return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
__lowerCamelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ )
__lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
__lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
__lowerCamelCase : Dict = 0
for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ):
em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
em /= len(lowerCamelCase__ )
return {"em": em}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
return model_prefix.startswith('rag' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowerCamelCase : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
continue
__lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p]
setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
return hparams, config
| 652 | 1 |
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : Optional[int] = []
for line in lines:
__lowerCamelCase : Optional[Any] = re.sub(R'#.*' , '' , lowerCamelCase__ ) # remove comments
if line:
filtered_lines.append(lowerCamelCase__ )
__lowerCamelCase : int = '\n'.join(lowerCamelCase__ )
# Make a hash from all this code
__lowerCamelCase : Tuple = full_str.encode('utf-8' )
return shaaaa(lowerCamelCase__ ).hexdigest()
# get importable module names and hash for caching
a ={
"""csv""": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"""json""": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"""pandas""": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"""parquet""": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"""arrow""": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"""text""": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"""imagefolder""": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"""audiofolder""": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
a ={
""".csv""": ("""csv""", {}),
""".tsv""": ("""csv""", {"""sep""": """\t"""}),
""".json""": ("""json""", {}),
""".jsonl""": ("""json""", {}),
""".parquet""": ("""parquet""", {}),
""".arrow""": ("""arrow""", {}),
""".txt""": ("""text""", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""imagefolder""", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("""audiofolder""", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
a ={"""imagefolder""", """audiofolder"""}
# Used to filter data files based on extensions given a module name
a ={}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(""".zip""")
_MODULE_TO_EXTENSIONS["audiofolder"].append(""".zip""")
| 652 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a =logging.get_logger(__name__)
# General docstring
a ="""MobileNetV1Config"""
# Base docstring
a ="""google/mobilenet_v1_1.0_224"""
a =[1, 1024, 7, 7]
# Image classification docstring
a ="""google/mobilenet_v1_1.0_224"""
a ="""tabby, tabby cat"""
a =[
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> str:
__lowerCamelCase : str = {}
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : int = model.mobilenet_va
else:
__lowerCamelCase : List[str] = model
__lowerCamelCase : List[Any] = 'MobilenetV1/Conv2d_0/'
__lowerCamelCase : List[Any] = backbone.conv_stem.convolution.weight
__lowerCamelCase : List[str] = backbone.conv_stem.normalization.bias
__lowerCamelCase : Tuple = backbone.conv_stem.normalization.weight
__lowerCamelCase : Union[str, Any] = backbone.conv_stem.normalization.running_mean
__lowerCamelCase : Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
__lowerCamelCase : Any = i + 1
__lowerCamelCase : Union[str, Any] = i * 2
__lowerCamelCase : Optional[Any] = backbone.layer[pt_index]
__lowerCamelCase : Optional[int] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
__lowerCamelCase : Tuple = pointer.convolution.weight
__lowerCamelCase : Optional[Any] = pointer.normalization.bias
__lowerCamelCase : Union[str, Any] = pointer.normalization.weight
__lowerCamelCase : List[str] = pointer.normalization.running_mean
__lowerCamelCase : Union[str, Any] = pointer.normalization.running_var
__lowerCamelCase : int = backbone.layer[pt_index + 1]
__lowerCamelCase : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
__lowerCamelCase : Optional[Any] = pointer.convolution.weight
__lowerCamelCase : Any = pointer.normalization.bias
__lowerCamelCase : str = pointer.normalization.weight
__lowerCamelCase : Dict = pointer.normalization.running_mean
__lowerCamelCase : List[str] = pointer.normalization.running_var
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__lowerCamelCase : Any = model.classifier.weight
__lowerCamelCase : int = model.classifier.bias
return tf_to_pt_map
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__lowerCamelCase : List[str] = tf.train.list_variables(lowerCamelCase__ )
__lowerCamelCase : List[str] = {}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
__lowerCamelCase : Any = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = array
# Build TF to PyTorch weights loading map
__lowerCamelCase : Tuple = _build_tf_to_pytorch_map(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
__lowerCamelCase : Optional[int] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__lowerCamelCase : List[str] = np.transpose(lowerCamelCase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__lowerCamelCase : Any = array.squeeze().transpose()
else:
__lowerCamelCase : Tuple = np.transpose(lowerCamelCase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
__lowerCamelCase : Optional[Any] = torch.from_numpy(lowerCamelCase__ )
tf_weights.pop(lowerCamelCase__ , lowerCamelCase__ )
tf_weights.pop(name + '/RMSProp' , lowerCamelCase__ )
tf_weights.pop(name + '/RMSProp_1' , lowerCamelCase__ )
tf_weights.pop(name + '/ExponentialMovingAverage' , lowerCamelCase__ )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> torch.Tensor:
__lowerCamelCase , __lowerCamelCase : int = features.shape[-2:]
__lowerCamelCase , __lowerCamelCase : List[str] = conv_layer.stride
__lowerCamelCase , __lowerCamelCase : str = conv_layer.kernel_size
if in_height % stride_height == 0:
__lowerCamelCase : Optional[int] = max(kernel_height - stride_height , 0 )
else:
__lowerCamelCase : Union[str, Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__lowerCamelCase : List[str] = max(kernel_width - stride_width , 0 )
else:
__lowerCamelCase : List[str] = max(kernel_width - (in_width % stride_width) , 0 )
__lowerCamelCase : List[str] = pad_along_width // 2
__lowerCamelCase : Optional[int] = pad_along_width - pad_left
__lowerCamelCase : Any = pad_along_height // 2
__lowerCamelCase : List[Any] = pad_along_height - pad_top
__lowerCamelCase : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase__ , lowerCamelCase__ , 'constant' , 0.0 )
class A_ ( nn.Module ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[bool] = True ,SCREAMING_SNAKE_CASE__ : Optional[bool or str] = True ,):
super().__init__()
__lowerCamelCase : Dict = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
__lowerCamelCase : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2)
__lowerCamelCase : Optional[int] = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ ,padding_mode='zeros' ,)
if use_normalization:
__lowerCamelCase : Optional[int] = nn.BatchNormad(
num_features=SCREAMING_SNAKE_CASE__ ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=SCREAMING_SNAKE_CASE__ ,track_running_stats=SCREAMING_SNAKE_CASE__ ,)
else:
__lowerCamelCase : Dict = None
if use_activation:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : str = ACTaFN[config.hidden_act]
else:
__lowerCamelCase : List[str] = config.hidden_act
else:
__lowerCamelCase : List[str] = None
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.Tensor):
if self.config.tf_padding:
__lowerCamelCase : Any = apply_tf_padding(SCREAMING_SNAKE_CASE__ ,self.convolution)
__lowerCamelCase : Optional[int] = self.convolution(SCREAMING_SNAKE_CASE__)
if self.normalization is not None:
__lowerCamelCase : Dict = self.normalization(SCREAMING_SNAKE_CASE__)
if self.activation is not None:
__lowerCamelCase : List[str] = self.activation(SCREAMING_SNAKE_CASE__)
return features
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = MobileNetVaConfig
_UpperCAmelCase : List[str] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[str] = '''mobilenet_v1'''
_UpperCAmelCase : Any = '''pixel_values'''
_UpperCAmelCase : int = False
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[nn.Linear, nn.Convad]):
if isinstance(SCREAMING_SNAKE_CASE__ ,(nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ ,nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
a =r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a =r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : bool = True):
super().__init__(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = config
__lowerCamelCase : Optional[int] = 3_2
__lowerCamelCase : List[str] = max(int(depth * config.depth_multiplier) ,config.min_depth)
__lowerCamelCase : Optional[Any] = MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=config.num_channels ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=2 ,)
__lowerCamelCase : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__lowerCamelCase : str = nn.ModuleList()
for i in range(1_3):
__lowerCamelCase : str = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__lowerCamelCase : str = max(int(depth * config.depth_multiplier) ,config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=strides[i] ,groups=SCREAMING_SNAKE_CASE__ ,))
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,))
__lowerCamelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict):
raise NotImplementedError
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,):
__lowerCamelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
__lowerCamelCase : Optional[Any] = self.conv_stem(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
__lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__)
if output_hidden_states:
__lowerCamelCase : Any = all_hidden_states + (hidden_states,)
__lowerCamelCase : Optional[Any] = hidden_states
if self.pooler is not None:
__lowerCamelCase : Tuple = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE__) ,start_dim=1)
else:
__lowerCamelCase : List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__ ,)
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig):
super().__init__(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = config.num_labels
__lowerCamelCase : Optional[Any] = MobileNetVaModel(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__lowerCamelCase : Any = nn.Dropout(config.classifier_dropout_prob ,inplace=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,):
__lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Optional[int] = self.mobilenet_va(SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : List[str] = self.classifier(self.dropout(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase : Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase : int = 'single_label_classification'
else:
__lowerCamelCase : Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowerCamelCase : Tuple = MSELoss()
if self.num_labels == 1:
__lowerCamelCase : int = loss_fct(logits.squeeze() ,labels.squeeze())
else:
__lowerCamelCase : Union[str, Any] = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase : List[str] = CrossEntropyLoss()
__lowerCamelCase : List[str] = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase : int = BCEWithLogitsLoss()
__lowerCamelCase : int = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if not return_dict:
__lowerCamelCase : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states ,)
| 652 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = '''transfo-xl'''
_UpperCAmelCase : int = ['''mems''']
_UpperCAmelCase : Tuple = {
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str=2_6_7_7_3_5 ,SCREAMING_SNAKE_CASE__ : List[str]=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] ,SCREAMING_SNAKE_CASE__ : Dict=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : Tuple=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_6 ,SCREAMING_SNAKE_CASE__ : List[str]=6_4 ,SCREAMING_SNAKE_CASE__ : List[Any]=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ,SCREAMING_SNAKE_CASE__ : Any=1_8 ,SCREAMING_SNAKE_CASE__ : List[str]=1_6_0_0 ,SCREAMING_SNAKE_CASE__ : List[Any]=1_0_0_0 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : Tuple=0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=-1 ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : Any="normal" ,SCREAMING_SNAKE_CASE__ : str=0.01 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.01 ,SCREAMING_SNAKE_CASE__ : Tuple=0.02 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1E-5 ,SCREAMING_SNAKE_CASE__ : str=0 ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Optional[Any] = []
self.cutoffs.extend(SCREAMING_SNAKE_CASE__)
if proj_share_all_but_first:
__lowerCamelCase : Optional[int] = [False] + [True] * len(self.cutoffs)
else:
__lowerCamelCase : List[Any] = [False] + [False] * len(self.cutoffs)
__lowerCamelCase : Union[str, Any] = d_model
__lowerCamelCase : Optional[Any] = d_embed
__lowerCamelCase : Dict = d_head
__lowerCamelCase : Union[str, Any] = d_inner
__lowerCamelCase : List[Any] = div_val
__lowerCamelCase : Tuple = pre_lnorm
__lowerCamelCase : Tuple = n_layer
__lowerCamelCase : Any = n_head
__lowerCamelCase : Any = mem_len
__lowerCamelCase : List[Any] = same_length
__lowerCamelCase : Tuple = attn_type
__lowerCamelCase : int = clamp_len
__lowerCamelCase : Optional[Any] = sample_softmax
__lowerCamelCase : Dict = adaptive
__lowerCamelCase : List[str] = dropout
__lowerCamelCase : Dict = dropatt
__lowerCamelCase : str = untie_r
__lowerCamelCase : List[Any] = init
__lowerCamelCase : List[Any] = init_range
__lowerCamelCase : List[Any] = proj_init_std
__lowerCamelCase : List[str] = init_std
__lowerCamelCase : str = layer_norm_epsilon
super().__init__(eos_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : List[str]):
# Message copied from Transformer-XL documentation
logger.info(F"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]):
# Message copied from Transformer-XL documentation
raise NotImplementedError(
F"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 652 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = cva.getAffineTransform(lowerCamelCase__ , lowerCamelCase__ )
return cva.warpAffine(lowerCamelCase__ , lowerCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
a =cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
a =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a , a =gray_img.shape
# set different points to rotate image
a =np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
a =np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
a =np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
a =np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
a =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a =plt.figure(1)
a =["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
a =["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCamelCase__ )
if number < 1:
__lowerCamelCase : int = F"Input value of [number={number}] must be > 0"
raise ValueError(lowerCamelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__lowerCamelCase : Any = int(math.log(number // 3 , 2 ) ) + 2
__lowerCamelCase : List[Any] = [3, 5]
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : List[str] = 3
for block in range(1 , lowerCamelCase__ ):
for _ in range(lowerCamelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a =0
try:
a =proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 652 | 1 |
import torch
from transformers import AutoModel
class A_ ( torch.nn.Module ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]="sayef/fsner-bert-base-uncased"):
super(SCREAMING_SNAKE_CASE__ ,self).__init__()
__lowerCamelCase : Optional[Any] = AutoModel.from_pretrained(SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = torch.nn.CosineSimilarity(3 ,1E-08)
__lowerCamelCase : Tuple = torch.nn.Softmax(dim=1)
def lowerCAmelCase ( self : Any ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
return self.bert(**SCREAMING_SNAKE_CASE__).last_hidden_state
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : int):
return token_embeddings.sum(2 ,keepdim=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1):
return self.softmax(T * self.cos(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : Any = W_supports['sizes'].tolist()
__lowerCamelCase : List[str] = W_supports['start_token_id'].item()
__lowerCamelCase : List[Any] = W_supports['end_token_id'].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__lowerCamelCase : Optional[int] = self.BERT(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = self.BERT(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = None
__lowerCamelCase : int = None
__lowerCamelCase : int = W_supports['input_ids'] == start_token_id
__lowerCamelCase : Dict = W_supports['input_ids'] == end_token_id
for i, size in enumerate(SCREAMING_SNAKE_CASE__):
if i == 0:
__lowerCamelCase : int = 0
else:
__lowerCamelCase : str = support_sizes[i - 1]
__lowerCamelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
__lowerCamelCase : Optional[int] = S[s : s + size][end_token_masks[s : s + size]]
__lowerCamelCase : Optional[Any] = torch.matmul(q[i] ,s_start.T).sum(1).softmax(0)
__lowerCamelCase : Tuple = torch.matmul(q[i] ,s_end.T).sum(1).softmax(0)
if p_starts is not None:
__lowerCamelCase : Tuple = torch.vstack((p_starts, p_start))
__lowerCamelCase : Dict = torch.vstack((p_ends, p_end))
else:
__lowerCamelCase : Any = p_start
__lowerCamelCase : Dict = p_end
return p_starts, p_ends
| 652 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 ,SCREAMING_SNAKE_CASE__ : int=4_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
__lowerCamelCase : str = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : int = num_channels
__lowerCamelCase : Dict = min_resolution
__lowerCamelCase : Tuple = max_resolution
__lowerCamelCase : Dict = do_resize
__lowerCamelCase : List[Any] = size
__lowerCamelCase : Tuple = do_normalize
__lowerCamelCase : Any = image_mean
__lowerCamelCase : List[str] = image_std
__lowerCamelCase : List[Any] = do_rescale
__lowerCamelCase : str = rescale_factor
__lowerCamelCase : Tuple = do_pad
def lowerCAmelCase ( self : Dict):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]=False):
if not batched:
__lowerCamelCase : Optional[Any] = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ ,Image.Image):
__lowerCamelCase , __lowerCamelCase : Any = image.size
else:
__lowerCamelCase , __lowerCamelCase : Any = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase : Optional[int] = int(self.size['shortest_edge'] * h / w)
__lowerCamelCase : Tuple = self.size['shortest_edge']
elif w > h:
__lowerCamelCase : Union[str, Any] = self.size['shortest_edge']
__lowerCamelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h)
else:
__lowerCamelCase : List[Any] = self.size['shortest_edge']
__lowerCamelCase : Optional[int] = self.size['shortest_edge']
else:
__lowerCamelCase : List[str] = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__lowerCamelCase : Tuple = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[0])[0]
__lowerCamelCase : Dict = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[int] = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = DetaImageProcessingTester(self)
@property
def lowerCAmelCase ( self : Any):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_rescale'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_pad'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size'))
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size ,{'shortest_edge': 1_8, 'longest_edge': 1_3_3_3})
self.assertEqual(image_processor.do_pad ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
pass
def lowerCAmelCase ( self : List[str]):
# Initialize image_processing
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image)
# Test not batched input
__lowerCamelCase : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase ( self : str):
# Initialize image_processing
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray)
# Test not batched input
__lowerCamelCase : Tuple = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase ( self : int):
# Initialize image_processing
__lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor)
# Test not batched input
__lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def lowerCAmelCase ( self : Optional[Any]):
# prepare image and target
__lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r') as f:
__lowerCamelCase : List[str] = json.loads(f.read())
__lowerCamelCase : Union[str, Any] = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
__lowerCamelCase : Optional[int] = DetaImageProcessor()
__lowerCamelCase : int = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
# verify pixel values
__lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
# verify area
__lowerCamelCase : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__))
# verify boxes
__lowerCamelCase : int = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# verify image_id
__lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__))
# verify is_crowd
__lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__))
# verify class_labels
__lowerCamelCase : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__))
# verify orig_size
__lowerCamelCase : str = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__))
# verify size
__lowerCamelCase : int = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
@slow
def lowerCAmelCase ( self : str):
# prepare image, target and masks_path
__lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r') as f:
__lowerCamelCase : Tuple = json.loads(f.read())
__lowerCamelCase : List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
__lowerCamelCase : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
__lowerCamelCase : List[str] = DetaImageProcessor(format='coco_panoptic')
__lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,masks_path=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
# verify pixel values
__lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
# verify area
__lowerCamelCase : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__))
# verify boxes
__lowerCamelCase : Tuple = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# verify image_id
__lowerCamelCase : int = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__))
# verify is_crowd
__lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__))
# verify class_labels
__lowerCamelCase : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__))
# verify masks
__lowerCamelCase : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,SCREAMING_SNAKE_CASE__)
# verify orig_size
__lowerCamelCase : Any = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__))
# verify size
__lowerCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
| 652 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict:
__lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {}
__lowerCamelCase : int = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]:
__lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,):
super().__init__()
__lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source')
__lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target')
__lowerCamelCase : List[Any] = self.get_char_lens(self.src_file)
__lowerCamelCase : List[Any] = max_source_length
__lowerCamelCase : List[str] = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Optional[int] = prefix
if n_obs is not None:
__lowerCamelCase : Dict = self.src_lens[:n_obs]
__lowerCamelCase : str = src_lang
__lowerCamelCase : Any = tgt_lang
def __len__( self : Tuple):
return len(self.src_lens)
def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Dict = index + 1 # linecache starts at 1
__lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
__lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowerCamelCase : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
)
__lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
__lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right')
__lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right')
__lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int):
return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()]
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch])
__lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch])
__lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch])
__lowerCamelCase : Optional[int] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
a =getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return list(itertools.chain.from_iterable(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
__lowerCamelCase : str = get_git_info()
save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]:
with open(lowerCamelCase__ , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
with open(lowerCamelCase__ ) as f:
return json.load(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ )
__lowerCamelCase : Any = {
'repo_id': str(lowerCamelCase__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List:
return list(map(lowerCamelCase__ , lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
with open(lowerCamelCase__ , 'wb' ) as f:
return pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
def remove_articles(lowerCamelCase__ ):
return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
__lowerCamelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ )
__lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
__lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
__lowerCamelCase : Dict = 0
for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ):
em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
em /= len(lowerCamelCase__ )
return {"em": em}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
return model_prefix.startswith('rag' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowerCamelCase : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
continue
__lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p]
setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
return hparams, config
| 652 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = KandinskyVaaControlnetPipeline
_UpperCAmelCase : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : int = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Tuple = False
@property
def lowerCAmelCase ( self : Tuple):
return 3_2
@property
def lowerCAmelCase ( self : List[Any]):
return 3_2
@property
def lowerCAmelCase ( self : str):
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[str]):
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : List[str]):
return 1_0_0
@property
def lowerCAmelCase ( self : Dict):
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__)
return model
@property
def lowerCAmelCase ( self : Union[str, Any]):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Optional[Any]):
torch.manual_seed(0)
__lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = self.dummy_unet
__lowerCamelCase : List[Any] = self.dummy_movq
__lowerCamelCase : str = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='linear' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Dict = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=0):
__lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to(
SCREAMING_SNAKE_CASE__)
# create hint
__lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = 'cpu'
__lowerCamelCase : Tuple = self.get_dummy_components()
__lowerCamelCase : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = output.images
__lowerCamelCase : Tuple = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0]
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : List[str] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : int):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy')
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
__lowerCamelCase : Tuple = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__)).float() / 255.0
__lowerCamelCase : str = hint.permute(2 ,0 ,1).unsqueeze(0)
__lowerCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa)
pipe_prior.to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa)
__lowerCamelCase : int = pipeline.to(SCREAMING_SNAKE_CASE__)
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 'A robot, 4k photo'
__lowerCamelCase : List[str] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe_prior(
SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
__lowerCamelCase : Optional[Any] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase : Any = pipeline(
image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,hint=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,output_type='np' ,)
__lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
| 652 | 1 |
from __future__ import annotations
from math import ceil, floor, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 2_0_0_0_0_0_0 ) -> int:
__lowerCamelCase : list[int] = [0]
__lowerCamelCase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__lowerCamelCase : int = 0
# the area corresponding to the grid that gives the product closest to target
__lowerCamelCase : int = 0
# an estimate of b, using the quadratic formula
__lowerCamelCase : float
# the largest integer less than b_estimate
__lowerCamelCase : int
# the largest integer less than b_estimate
__lowerCamelCase : int
# the triangle number corresponding to b_floor
__lowerCamelCase : int
# the triangle number corresponding to b_ceil
__lowerCamelCase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__lowerCamelCase : List[str] = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__lowerCamelCase : Dict = floor(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = ceil(lowerCamelCase__ )
__lowerCamelCase : List[str] = triangle_numbers[b_floor]
__lowerCamelCase : Tuple = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase : Any = triangle_b_first_guess * triangle_a
__lowerCamelCase : List[Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__lowerCamelCase : int = triangle_b_second_guess * triangle_a
__lowerCamelCase : Optional[Any] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 652 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
_UpperCAmelCase : int = XGLMConfig
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Tuple = '''gelu'''
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_4 ,SCREAMING_SNAKE_CASE__ : Tuple=7 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=9_9 ,SCREAMING_SNAKE_CASE__ : str=3_2 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 ,SCREAMING_SNAKE_CASE__ : Tuple=3_7 ,SCREAMING_SNAKE_CASE__ : Tuple="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=5_1_2 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : str = seq_length
__lowerCamelCase : Optional[Any] = is_training
__lowerCamelCase : Any = use_input_mask
__lowerCamelCase : str = use_labels
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : Dict = d_model
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : List[str] = ffn_dim
__lowerCamelCase : Optional[Any] = activation_function
__lowerCamelCase : Tuple = activation_dropout
__lowerCamelCase : Union[str, Any] = attention_dropout
__lowerCamelCase : List[str] = max_position_embeddings
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Any = None
__lowerCamelCase : List[str] = 0
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Dict = 1
def lowerCAmelCase ( self : Any):
return XGLMConfig.from_pretrained('facebook/xglm-564M')
def lowerCAmelCase ( self : str):
__lowerCamelCase : Any = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) ,clip_value_min=0 ,clip_value_max=3)
__lowerCamelCase : Dict = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : int = self.get_config()
__lowerCamelCase : Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase ( self : List[Any]):
return XGLMConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=SCREAMING_SNAKE_CASE__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : int):
__lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Any = config_and_inputs
__lowerCamelCase : str = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_UpperCAmelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
_UpperCAmelCase : str = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Union[str, Any] = False
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Tuple = TFXGLMModelTester(self)
__lowerCamelCase : int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,n_embd=3_7)
def lowerCAmelCase ( self : List[Any]):
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase ( self : str):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.')
def lowerCAmelCase ( self : Union[str, Any]):
super().test_resize_token_embeddings()
@require_tf
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True):
__lowerCamelCase : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : int = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] ,dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : Optional[Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowerCamelCase : int = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Tuple = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
tf.random.set_seed(0)
__lowerCamelCase : Optional[Any] = tokenizer('Today is a nice day and' ,return_tensors='tf')
__lowerCamelCase : List[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0'):
__lowerCamelCase : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,seed=[7, 0])
__lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : Union[str, Any] = 'left'
# use different length sentences to test batching
__lowerCamelCase : List[str] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors='tf' ,padding=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = inputs['input_ids']
__lowerCamelCase : Dict = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=inputs['attention_mask'] ,max_new_tokens=1_2)
__lowerCamelCase : Tuple = tokenizer(sentences[0] ,return_tensors='tf').input_ids
__lowerCamelCase : List[str] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2)
__lowerCamelCase : Any = tokenizer(sentences[1] ,return_tensors='tf').input_ids
__lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2)
__lowerCamelCase : int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tokenizer.decode(output_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,[non_padded_sentence, padded_sentence])
| 652 | 1 |
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
class A_ ( nn.Module ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=("DownEncoderBlock2D",) ,SCREAMING_SNAKE_CASE__ : Optional[Any]=(6_4,) ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3_2 ,SCREAMING_SNAKE_CASE__ : int="silu" ,SCREAMING_SNAKE_CASE__ : int=True ,):
super().__init__()
__lowerCamelCase : Tuple = layers_per_block
__lowerCamelCase : Union[str, Any] = torch.nn.Convad(
SCREAMING_SNAKE_CASE__ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
__lowerCamelCase : List[str] = None
__lowerCamelCase : str = nn.ModuleList([])
# down
__lowerCamelCase : str = block_out_channels[0]
for i, down_block_type in enumerate(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = output_channel
__lowerCamelCase : Optional[Any] = block_out_channels[i]
__lowerCamelCase : int = i == len(SCREAMING_SNAKE_CASE__) - 1
__lowerCamelCase : Optional[Any] = get_down_block(
SCREAMING_SNAKE_CASE__ ,num_layers=self.layers_per_block ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,add_downsample=not is_final_block ,resnet_eps=1E-6 ,downsample_padding=0 ,resnet_act_fn=SCREAMING_SNAKE_CASE__ ,resnet_groups=SCREAMING_SNAKE_CASE__ ,attention_head_dim=SCREAMING_SNAKE_CASE__ ,temb_channels=SCREAMING_SNAKE_CASE__ ,)
self.down_blocks.append(SCREAMING_SNAKE_CASE__)
# mid
__lowerCamelCase : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=SCREAMING_SNAKE_CASE__ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=SCREAMING_SNAKE_CASE__ ,temb_channels=SCREAMING_SNAKE_CASE__ ,)
# out
__lowerCamelCase : Optional[int] = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=SCREAMING_SNAKE_CASE__ ,eps=1E-6)
__lowerCamelCase : Any = nn.SiLU()
__lowerCamelCase : Optional[int] = 2 * out_channels if double_z else out_channels
__lowerCamelCase : str = nn.Convad(block_out_channels[-1] ,SCREAMING_SNAKE_CASE__ ,3 ,padding=1)
__lowerCamelCase : List[str] = False
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : str = x
__lowerCamelCase : str = self.conv_in(SCREAMING_SNAKE_CASE__)
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE__ : Optional[int]):
def custom_forward(*SCREAMING_SNAKE_CASE__ : int):
return module(*SCREAMING_SNAKE_CASE__)
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0'):
for down_block in self.down_blocks:
__lowerCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ ,use_reentrant=SCREAMING_SNAKE_CASE__)
# middle
__lowerCamelCase : Union[str, Any] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) ,SCREAMING_SNAKE_CASE__ ,use_reentrant=SCREAMING_SNAKE_CASE__)
else:
for down_block in self.down_blocks:
__lowerCamelCase : Tuple = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
# middle
__lowerCamelCase : Dict = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block) ,SCREAMING_SNAKE_CASE__)
else:
# down
for down_block in self.down_blocks:
__lowerCamelCase : Any = down_block(SCREAMING_SNAKE_CASE__)
# middle
__lowerCamelCase : Any = self.mid_block(SCREAMING_SNAKE_CASE__)
# post-process
__lowerCamelCase : List[Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.conv_act(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.conv_out(SCREAMING_SNAKE_CASE__)
return sample
class A_ ( nn.Module ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : List[str]=3 ,SCREAMING_SNAKE_CASE__ : Dict=("UpDecoderBlock2D",) ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=(6_4,) ,SCREAMING_SNAKE_CASE__ : str=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]="silu" ,SCREAMING_SNAKE_CASE__ : int="group" ,):
super().__init__()
__lowerCamelCase : Tuple = layers_per_block
__lowerCamelCase : Dict = nn.Convad(
SCREAMING_SNAKE_CASE__ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
__lowerCamelCase : List[str] = None
__lowerCamelCase : Optional[int] = nn.ModuleList([])
__lowerCamelCase : Optional[Any] = in_channels if norm_type == 'spatial' else None
# mid
__lowerCamelCase : Optional[int] = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1E-6 ,resnet_act_fn=SCREAMING_SNAKE_CASE__ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=SCREAMING_SNAKE_CASE__ ,temb_channels=SCREAMING_SNAKE_CASE__ ,)
# up
__lowerCamelCase : str = list(reversed(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Optional[int] = output_channel
__lowerCamelCase : Dict = reversed_block_out_channels[i]
__lowerCamelCase : List[Any] = i == len(SCREAMING_SNAKE_CASE__) - 1
__lowerCamelCase : Any = get_up_block(
SCREAMING_SNAKE_CASE__ ,num_layers=self.layers_per_block + 1 ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,prev_output_channel=SCREAMING_SNAKE_CASE__ ,add_upsample=not is_final_block ,resnet_eps=1E-6 ,resnet_act_fn=SCREAMING_SNAKE_CASE__ ,resnet_groups=SCREAMING_SNAKE_CASE__ ,attention_head_dim=SCREAMING_SNAKE_CASE__ ,temb_channels=SCREAMING_SNAKE_CASE__ ,resnet_time_scale_shift=SCREAMING_SNAKE_CASE__ ,)
self.up_blocks.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = output_channel
# out
if norm_type == "spatial":
__lowerCamelCase : Optional[int] = SpatialNorm(block_out_channels[0] ,SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : Any = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=SCREAMING_SNAKE_CASE__ ,eps=1E-6)
__lowerCamelCase : Tuple = nn.SiLU()
__lowerCamelCase : int = nn.Convad(block_out_channels[0] ,SCREAMING_SNAKE_CASE__ ,3 ,padding=1)
__lowerCamelCase : List[str] = False
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str]=None):
__lowerCamelCase : int = z
__lowerCamelCase : Optional[Any] = self.conv_in(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = next(iter(self.up_blocks.parameters())).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(SCREAMING_SNAKE_CASE__ : List[Any]):
def custom_forward(*SCREAMING_SNAKE_CASE__ : List[str]):
return module(*SCREAMING_SNAKE_CASE__)
return custom_forward
if is_torch_version('>=' ,'1.11.0'):
# middle
__lowerCamelCase : Dict = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,use_reentrant=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = sample.to(SCREAMING_SNAKE_CASE__)
# up
for up_block in self.up_blocks:
__lowerCamelCase : List[str] = torch.utils.checkpoint.checkpoint(
create_custom_forward(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,use_reentrant=SCREAMING_SNAKE_CASE__)
else:
# middle
__lowerCamelCase : Optional[int] = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block) ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = sample.to(SCREAMING_SNAKE_CASE__)
# up
for up_block in self.up_blocks:
__lowerCamelCase : Any = torch.utils.checkpoint.checkpoint(create_custom_forward(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
else:
# middle
__lowerCamelCase : List[str] = self.mid_block(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = sample.to(SCREAMING_SNAKE_CASE__)
# up
for up_block in self.up_blocks:
__lowerCamelCase : Tuple = up_block(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# post-process
if latent_embeds is None:
__lowerCamelCase : int = self.conv_norm_out(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : Optional[Any] = self.conv_norm_out(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = self.conv_act(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.conv_out(SCREAMING_SNAKE_CASE__)
return sample
class A_ ( nn.Module ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : Any="random" ,SCREAMING_SNAKE_CASE__ : Optional[Any]=False ,SCREAMING_SNAKE_CASE__ : Dict=True):
super().__init__()
__lowerCamelCase : Tuple = n_e
__lowerCamelCase : List[str] = vq_embed_dim
__lowerCamelCase : Any = beta
__lowerCamelCase : Tuple = legacy
__lowerCamelCase : Any = nn.Embedding(self.n_e ,self.vq_embed_dim)
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e)
__lowerCamelCase : Tuple = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap)))
__lowerCamelCase : Union[str, Any] = self.used.shape[0]
__lowerCamelCase : str = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__lowerCamelCase : Optional[int] = self.re_embed
__lowerCamelCase : Union[str, Any] = self.re_embed + 1
print(
F"Remapping {self.n_e} indices to {self.re_embed} indices. "
F"Using {self.unknown_index} for unknown indices.")
else:
__lowerCamelCase : int = n_e
__lowerCamelCase : Optional[int] = sane_index_shape
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : str = inds.shape
assert len(SCREAMING_SNAKE_CASE__) > 1
__lowerCamelCase : str = inds.reshape(ishape[0] ,-1)
__lowerCamelCase : Any = self.used.to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = (inds[:, :, None] == used[None, None, ...]).long()
__lowerCamelCase : Any = match.argmax(-1)
__lowerCamelCase : Dict = match.sum(2) < 1
if self.unknown_index == "random":
__lowerCamelCase : List[Any] = torch.randint(0 ,self.re_embed ,size=new[unknown].shape).to(device=new.device)
else:
__lowerCamelCase : Optional[int] = self.unknown_index
return new.reshape(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Union[str, Any] = inds.shape
assert len(SCREAMING_SNAKE_CASE__) > 1
__lowerCamelCase : int = inds.reshape(ishape[0] ,-1)
__lowerCamelCase : List[Any] = self.used.to(SCREAMING_SNAKE_CASE__)
if self.re_embed > self.used.shape[0]: # extra token
__lowerCamelCase : Any = 0 # simply set to zero
__lowerCamelCase : int = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,SCREAMING_SNAKE_CASE__)
return back.reshape(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]):
# reshape z -> (batch, height, width, channel) and flatten
__lowerCamelCase : Any = z.permute(0 ,2 ,3 ,1).contiguous()
__lowerCamelCase : str = z.view(-1 ,self.vq_embed_dim)
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__lowerCamelCase : Any = torch.argmin(torch.cdist(SCREAMING_SNAKE_CASE__ ,self.embedding.weight) ,dim=1)
__lowerCamelCase : Dict = self.embedding(SCREAMING_SNAKE_CASE__).view(z.shape)
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : List[Any] = None
# compute loss for embedding
if not self.legacy:
__lowerCamelCase : Optional[int] = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
else:
__lowerCamelCase : str = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)
# preserve gradients
__lowerCamelCase : str = z + (z_q - z).detach()
# reshape back to match original input shape
__lowerCamelCase : Optional[int] = z_q.permute(0 ,3 ,1 ,2).contiguous()
if self.remap is not None:
__lowerCamelCase : List[Any] = min_encoding_indices.reshape(z.shape[0] ,-1) # add batch axis
__lowerCamelCase : List[str] = self.remap_to_used(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = min_encoding_indices.reshape(-1 ,1) # flatten
if self.sane_index_shape:
__lowerCamelCase : Tuple = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3])
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__lowerCamelCase : str = indices.reshape(shape[0] ,-1) # add batch axis
__lowerCamelCase : Dict = self.unmap_to_all(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = indices.reshape(-1) # flatten again
# get quantized latent vectors
__lowerCamelCase : Tuple = self.embedding(SCREAMING_SNAKE_CASE__)
if shape is not None:
__lowerCamelCase : int = z_q.view(SCREAMING_SNAKE_CASE__)
# reshape back to match original input shape
__lowerCamelCase : int = z_q.permute(0 ,3 ,1 ,2).contiguous()
return z_q
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[Any]=False):
__lowerCamelCase : Optional[int] = parameters
__lowerCamelCase , __lowerCamelCase : Tuple = torch.chunk(SCREAMING_SNAKE_CASE__ ,2 ,dim=1)
__lowerCamelCase : Dict = torch.clamp(self.logvar ,-30.0 ,20.0)
__lowerCamelCase : Any = deterministic
__lowerCamelCase : List[Any] = torch.exp(0.5 * self.logvar)
__lowerCamelCase : Union[str, Any] = torch.exp(self.logvar)
if self.deterministic:
__lowerCamelCase : int = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None):
# make sure sample is on the same device as the parameters and has same dtype
__lowerCamelCase : List[Any] = randn_tensor(
self.mean.shape ,generator=SCREAMING_SNAKE_CASE__ ,device=self.parameters.device ,dtype=self.parameters.dtype)
__lowerCamelCase : Tuple = self.mean + self.std * sample
return x
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int=None):
if self.deterministic:
return torch.Tensor([0.0])
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3])
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : int=[1, 2, 3]):
if self.deterministic:
return torch.Tensor([0.0])
__lowerCamelCase : Optional[Any] = np.log(2.0 * np.pi)
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2) / self.var ,dim=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
return self.mean
| 652 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 652 | 1 |
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A_ :
@staticmethod
def lowerCAmelCase ( *SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : List[str]):
pass
@is_pipeline_test
@require_vision
class A_ ( unittest.TestCase ):
@require_torch
def lowerCAmelCase ( self : str):
__lowerCamelCase : int = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' ,)
__lowerCamelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__lowerCamelCase : Tuple = image_classifier(SCREAMING_SNAKE_CASE__ ,candidate_labels=['a', 'b', 'c'])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(SCREAMING_SNAKE_CASE__) ,[
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] ,)
__lowerCamelCase : Any = image_classifier([image] * 5 ,candidate_labels=['A', 'B', 'C'] ,batch_size=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__) ,[
[
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
],
[
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
],
[
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
],
[
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
],
[
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
],
] ,)
@require_tf
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Any = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' ,framework='tf')
__lowerCamelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__lowerCamelCase : Union[str, Any] = image_classifier(SCREAMING_SNAKE_CASE__ ,candidate_labels=['a', 'b', 'c'])
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__) ,[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] ,)
__lowerCamelCase : Union[str, Any] = image_classifier([image] * 5 ,candidate_labels=['A', 'B', 'C'] ,batch_size=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__) ,[
[
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
],
[
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
],
[
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
],
[
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
],
[
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
{'score': 0.333, 'label': ANY(SCREAMING_SNAKE_CASE__)},
],
] ,)
@slow
@require_torch
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Any = pipeline(
task='zero-shot-image-classification' ,model='openai/clip-vit-base-patch32' ,)
# This is an image of 2 cats with remotes and no planes
__lowerCamelCase : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__lowerCamelCase : str = image_classifier(SCREAMING_SNAKE_CASE__ ,candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__) ,[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] ,)
__lowerCamelCase : Any = image_classifier([image] * 5 ,candidate_labels=['cat', 'plane', 'remote'] ,batch_size=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__) ,[
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 ,)
@slow
@require_tf
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : str = pipeline(
task='zero-shot-image-classification' ,model='openai/clip-vit-base-patch32' ,framework='tf')
# This is an image of 2 cats with remotes and no planes
__lowerCamelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
__lowerCamelCase : List[Any] = image_classifier(SCREAMING_SNAKE_CASE__ ,candidate_labels=['cat', 'plane', 'remote'])
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__) ,[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] ,)
__lowerCamelCase : Any = image_classifier([image] * 5 ,candidate_labels=['cat', 'plane', 'remote'] ,batch_size=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__) ,[
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 ,)
| 652 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a =logging.get_logger(__name__)
# General docstring
a ="""RegNetConfig"""
# Base docstring
a ="""facebook/regnet-y-040"""
a =[1, 1088, 7, 7]
# Image classification docstring
a ="""facebook/regnet-y-040"""
a ="""tabby, tabby cat"""
a =[
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCamelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
__lowerCamelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,strides=SCREAMING_SNAKE_CASE__ ,padding='VALID' ,groups=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution' ,)
__lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
__lowerCamelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : List[Any] = self.convolution(self.padding(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Dict):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = config.num_channels
__lowerCamelCase : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[int] = shape_list(SCREAMING_SNAKE_CASE__)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCamelCase : Optional[int] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 2, 3, 1))
__lowerCamelCase : List[Any] = self.embedder(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,strides=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution')
__lowerCamelCase : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False):
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__)
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
__lowerCamelCase : Dict = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='relu' ,name='attention.0'),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2'),
]
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCamelCase : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE__)
for layer_module in self.attention:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = hidden_state * pooled
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[Any]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = in_channels != out_channels or stride != 1
__lowerCamelCase : Union[str, Any] = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : Dict = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.2'),
]
__lowerCamelCase : Dict = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : int = hidden_state
for layer_module in self.layers:
__lowerCamelCase : List[str] = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : int = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[str]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = in_channels != out_channels or stride != 1
__lowerCamelCase : Tuple = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : int = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetSELayer(SCREAMING_SNAKE_CASE__ ,reduced_channels=int(round(in_channels / 4)) ,name='layer.2'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.3'),
]
__lowerCamelCase : List[Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[int] = hidden_state
for layer_module in self.layers:
__lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : Any = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
__lowerCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='layers.0'),
*[layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,name=F"layers.{i+1}") for i in range(depth - 1)],
]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]):
for layer_module in self.layers:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,))
__lowerCamelCase : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ ,config.depths[1:])):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,depth=SCREAMING_SNAKE_CASE__ ,name=F"stages.{i+1}"))
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True):
__lowerCamelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase : Optional[Any] = hidden_states + (hidden_state,)
__lowerCamelCase : str = stage_module(SCREAMING_SNAKE_CASE__)
if output_hidden_states:
__lowerCamelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__)
@keras_serializable
class A_ ( tf.keras.layers.Layer ):
_UpperCAmelCase : List[Any] = RegNetConfig
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = config
__lowerCamelCase : Optional[int] = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ ,name='embedder')
__lowerCamelCase : Union[str, Any] = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ ,name='encoder')
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
@unpack_inputs
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,):
__lowerCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = encoder_outputs[0]
__lowerCamelCase : int = self.pooler(SCREAMING_SNAKE_CASE__)
# Change to NCHW output format have uniformity in the modules
__lowerCamelCase : Union[str, Any] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
__lowerCamelCase : str = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCamelCase : Union[str, Any] = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = RegNetConfig
_UpperCAmelCase : Optional[int] = '''regnet'''
_UpperCAmelCase : List[Any] = '''pixel_values'''
@property
def lowerCAmelCase ( self : int):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa)}
a =r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
a =r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : int=False ,):
__lowerCamelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : str):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = config.num_labels
__lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
# classification head
__lowerCamelCase : Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='classifier.1') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Any=False ,):
__lowerCamelCase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : str = self.regnet(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : Optional[Any] = self.classifier[0](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.classifier[1](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__)
if not return_dict:
__lowerCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states)
| 652 | 1 |
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 652 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a =object()
# For specifying empty leaf dict `{}`
a =object()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Tuple = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(lowerCamelCase__ ) - len(lowerCamelCase__ ) + 1 ):
__lowerCamelCase : str = [x.match(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , ks[i:] )]
if matches and all(lowerCamelCase__ ):
return True
return False
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
def replace(lowerCamelCase__ , lowerCamelCase__ ):
for rule, replacement in rules:
if _match(lowerCamelCase__ , lowerCamelCase__ ):
return replacement
return val
return replace
def SCREAMING_SNAKE_CASE__ ( ) -> str:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , lowerCamelCase__ )),
(("transformer", "wte", "embedding"), P('mp' , lowerCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = _get_partition_rules()
__lowerCamelCase : Optional[Any] = _replacement_rules(lowerCamelCase__ )
__lowerCamelCase : Tuple = {k: _unmatched for k in flatten_dict(lowerCamelCase__ )}
__lowerCamelCase : List[Any] = {k: replace(lowerCamelCase__ , lowerCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCamelCase__ ) )
| 652 | 1 |
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : int = MobileBertTokenizer
_UpperCAmelCase : Tuple = MobileBertTokenizerFast
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Any = filter_non_english
_UpperCAmelCase : str = '''google/mobilebert-uncased'''
def lowerCAmelCase ( self : List[Any]):
super().setUp()
__lowerCamelCase : Tuple = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
__lowerCamelCase : Union[str, Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : Dict = 'UNwant\u00E9d,running'
__lowerCamelCase : Dict = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Optional[Any] = self.tokenizer_class(self.vocab_file)
__lowerCamelCase : Optional[int] = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__) ,[9, 6, 7, 1_2, 1_0, 1_1])
def lowerCAmelCase ( self : Any):
if not self.test_rust_tokenizer:
return
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : Dict = self.get_rust_tokenizer()
__lowerCamelCase : List[str] = 'UNwant\u00E9d,running'
__lowerCamelCase : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = self.get_rust_tokenizer()
__lowerCamelCase : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# With lower casing
__lowerCamelCase : int = self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.get_rust_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = 'UNwant\u00E9d,running'
__lowerCamelCase : List[str] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCamelCase : int = tokenizer.encode(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') ,['ah', '\u535A', '\u63A8', 'zz'])
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Tuple = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') ,['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') ,['hello'])
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Tuple = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') ,['hällo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') ,['h\u00E9llo'])
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : int = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') ,['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') ,['hello'])
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : List[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') ,['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') ,['hello'])
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : int = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : int = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'])
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def lowerCAmelCase ( self : str):
__lowerCamelCase : int = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,never_split=['[UNK]'])
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : int = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowerCamelCase : List[str] = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[str] = i
__lowerCamelCase : Tuple = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ ,unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') ,[])
self.assertListEqual(tokenizer.tokenize('unwanted running') ,['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running') ,['[UNK]', 'runn', '##ing'])
def lowerCAmelCase ( self : Tuple):
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\u00A0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def lowerCAmelCase ( self : Union[str, Any]):
self.assertTrue(_is_control('\u0005'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def lowerCAmelCase ( self : List[Any]):
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Optional[Any] = self.get_tokenizer()
__lowerCamelCase : Any = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE__) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']])
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']])
@slow
def lowerCAmelCase ( self : Any):
__lowerCamelCase : List[Any] = self.tokenizer_class.from_pretrained('google/mobilebert-uncased')
__lowerCamelCase : str = tokenizer.encode('sequence builders' ,add_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def lowerCAmelCase ( self : Optional[int]):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = F"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__lowerCamelCase : List[str] = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : List[str] = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE__ ,'do_lower_case') else False
__lowerCamelCase : Union[str, Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'Allen'),
((2_1, 2_3), '##NL'),
((2_3, 2_4), '##P'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), 'allen'),
((2_1, 2_3), '##nl'),
((2_3, 2_4), '##p'),
((2_5, 3_3), 'sentence'),
((3_3, 3_4), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids']))
self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'])
def lowerCAmelCase ( self : int):
__lowerCamelCase : Any = ['的', '人', '有']
__lowerCamelCase : List[Any] = ''.join(SCREAMING_SNAKE_CASE__)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})"):
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : str = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = False
__lowerCamelCase : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = tokenizer_r.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = tokenizer_p.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__)
# it is expected that only the first Chinese character is not preceded by "##".
__lowerCamelCase : Union[str, Any] = [
F"##{token}" if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE__)
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
| 652 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list:
__lowerCamelCase : Union[str, Any] = [True] * n
__lowerCamelCase : List[Any] = False
__lowerCamelCase : int = False
__lowerCamelCase : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__lowerCamelCase : List[str] = i * 2
while index < n:
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = index + i
__lowerCamelCase : Optional[Any] = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int:
__lowerCamelCase : Optional[Any] = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0
__lowerCamelCase : Dict = prime_sieve(lowerCamelCase__ )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 0
__lowerCamelCase : Any = primes[prime_index]
while (last_prime**2) <= limit:
__lowerCamelCase : Any = primes[prime_index + 1]
__lowerCamelCase : Optional[Any] = last_prime**2
__lowerCamelCase : Dict = next_prime**2
# Get numbers divisible by lps(current)
__lowerCamelCase : Tuple = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__lowerCamelCase : Any = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__lowerCamelCase : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__lowerCamelCase : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a ={"""tokenization_bertweet""": ["""BertweetTokenizer"""]}
if TYPE_CHECKING:
from .tokenization_bertweet import BertweetTokenizer
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = 1
@register_to_config
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.15 ,SCREAMING_SNAKE_CASE__ : float = 0.01 ,SCREAMING_SNAKE_CASE__ : float = 1348.0 ,SCREAMING_SNAKE_CASE__ : float = 1E-5 ,SCREAMING_SNAKE_CASE__ : int = 1 ,):
# standard deviation of the initial noise distribution
__lowerCamelCase : int = sigma_max
# setable values
__lowerCamelCase : List[str] = None
self.set_sigmas(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[int] = None):
return sample
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None):
__lowerCamelCase : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowerCamelCase : Optional[int] = torch.linspace(1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None):
__lowerCamelCase : Optional[int] = sigma_min if sigma_min is not None else self.config.sigma_min
__lowerCamelCase : Optional[int] = sigma_max if sigma_max is not None else self.config.sigma_max
__lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowerCamelCase : Optional[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE__) ,math.log(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str]):
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
__lowerCamelCase : List[str] = timestep * torch.ones(
sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
__lowerCamelCase : str = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowerCamelCase : Dict = timesteps.to(self.discrete_sigmas.device)
__lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device)
__lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).to(sample.device)
__lowerCamelCase : int = torch.zeros_like(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowerCamelCase : int = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
__lowerCamelCase : List[Any] = diffusion.unsqueeze(-1)
__lowerCamelCase : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowerCamelCase : int = randn_tensor(
sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__ ,device=sample.device ,dtype=sample.dtype)
__lowerCamelCase : Optional[int] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowerCamelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ ,prev_sample_mean=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowerCamelCase : Optional[int] = randn_tensor(sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__).to(sample.device)
# compute step size from the model_output, the noise, and the snr
__lowerCamelCase : str = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowerCamelCase : Optional[int] = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
__lowerCamelCase : List[str] = step_size.unsqueeze(-1)
__lowerCamelCase : str = sample + step_size * model_output
__lowerCamelCase : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase : int = timesteps.to(original_samples.device)
__lowerCamelCase : Any = self.discrete_sigmas.to(original_samples.device)[timesteps]
__lowerCamelCase : Optional[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(SCREAMING_SNAKE_CASE__) * sigmas[:, None, None, None]
)
__lowerCamelCase : str = noise + original_samples
return noisy_samples
def __len__( self : Optional[int]):
return self.config.num_train_timesteps
| 652 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> tuple[float, float]:
# Check if the input is valid
if not len(lowerCamelCase__ ) == len(lowerCamelCase__ ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = equationa
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = equationa
# Calculate the determinants of the matrices
__lowerCamelCase : List[Any] = aa * ba - aa * ba
__lowerCamelCase : Union[str, Any] = ca * ba - ca * ba
__lowerCamelCase : List[str] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__lowerCamelCase : Optional[int] = determinant_x / determinant
__lowerCamelCase : Any = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 652 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : str = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : int = features[:, labels == i]
__lowerCamelCase : Optional[int] = data.mean(1 )
# Centralize the data of class i
__lowerCamelCase : int = data - column_reshape(lowerCamelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCamelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : Union[str, Any] = np.dot(lowerCamelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = features.mean(1 )
__lowerCamelCase : Union[str, Any] = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = features[:, labels == i]
__lowerCamelCase : Union[str, Any] = data.shape[1]
__lowerCamelCase : Union[str, Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : List[str] = device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
__lowerCamelCase : Tuple = features.mean(1 )
# Center the dataset
__lowerCamelCase : Any = features - np.reshape(lowerCamelCase__ , (data_mean.size, 1) )
__lowerCamelCase : Optional[int] = np.dot(lowerCamelCase__ , centered_data.T ) / features.shape[1]
__lowerCamelCase , __lowerCamelCase : List[Any] = np.linalg.eigh(lowerCamelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowerCamelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowerCamelCase : int = np.dot(filtered_eigenvectors.T , lowerCamelCase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowerCamelCase , __lowerCamelCase : Dict = eigh(
covariance_between_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , covariance_within_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , )
__lowerCamelCase : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = np.linalg.svd(lowerCamelCase__ )
__lowerCamelCase : int = svd_matrix[:, 0:dimensions]
__lowerCamelCase : Optional[int] = np.dot(filtered_svd_matrix.T , lowerCamelCase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
__lowerCamelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowerCamelCase : Optional[int] = np.array([0, 0, 0, 1, 1] )
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Tuple = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : int = linear_discriminant_analysis(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if isinstance(lowerCamelCase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowerCamelCase : Dict = 2
__lowerCamelCase : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : Optional[Any] = principal_component_analysis(lowerCamelCase__ , lowerCamelCase__ )
if not np.allclose(lowerCamelCase__ , lowerCamelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
a =logging.get_logger(__name__)
@add_end_docstrings(SCREAMING_SNAKE_CASE )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : str ,**SCREAMING_SNAKE_CASE__ : Dict):
super().__init__(**SCREAMING_SNAKE_CASE__)
if self.framework != "pt":
raise ValueError(F"The {self.__class__} is only available in PyTorch.")
# No specific FOR_XXX available yet
def __call__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[np.ndarray, bytes, str] ,**SCREAMING_SNAKE_CASE__ : Tuple):
return super().__call__(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Union[str, Any] = {}
if "candidate_labels" in kwargs:
__lowerCamelCase : Any = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__lowerCamelCase : str = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Tuple="This is a sound of {}."):
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
if audio.startswith('http://') or audio.startswith('https://'):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__lowerCamelCase : Tuple = requests.get(SCREAMING_SNAKE_CASE__).content
else:
with open(SCREAMING_SNAKE_CASE__ ,'rb') as f:
__lowerCamelCase : Optional[int] = f.read()
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Any = ffmpeg_read(SCREAMING_SNAKE_CASE__ ,self.feature_extractor.sampling_rate)
if not isinstance(SCREAMING_SNAKE_CASE__ ,np.ndarray):
raise ValueError('We expect a numpy ndarray as input')
if len(audio.shape) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline')
__lowerCamelCase : List[Any] = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors='pt')
__lowerCamelCase : List[Any] = candidate_labels
__lowerCamelCase : int = [hypothesis_template.format(SCREAMING_SNAKE_CASE__) for x in candidate_labels]
__lowerCamelCase : List[str] = self.tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors=self.framework ,padding=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = [text_inputs]
return inputs
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : List[Any] = model_inputs.pop('candidate_labels')
__lowerCamelCase : str = model_inputs.pop('text_inputs')
if isinstance(text_inputs[0] ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = text_inputs[0]
else:
# Batching case.
__lowerCamelCase : Union[str, Any] = text_inputs[0][0]
__lowerCamelCase : int = self.model(**SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : str = model_outputs.pop('candidate_labels')
__lowerCamelCase : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
__lowerCamelCase : Dict = logits.softmax(dim=0)
__lowerCamelCase : Optional[int] = probs.tolist()
else:
raise ValueError('`tf` framework not supported.')
__lowerCamelCase : Dict = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) ,key=lambda SCREAMING_SNAKE_CASE__: -x[0])
]
return result
| 652 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))]
if identifier is not None:
__lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
for n_ in n_identifier:
__lowerCamelCase : Optional[int] = [file for file in files if n_ not in file]
else:
__lowerCamelCase : Dict = [file for file in files if n_identifier not in file]
__lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py')
__lowerCamelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,SCREAMING_SNAKE_CASE__)
if only_modules:
__lowerCamelCase : Optional[int] = file.split('.')[0]
try:
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__)
self.assertIs(len(result.failures) ,0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed ,0)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = 'modeling'
__lowerCamelCase : Dict = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = Path('src/transformers')
__lowerCamelCase : Optional[int] = 'tokenization'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = Path('src/transformers')
__lowerCamelCase : str = 'configuration'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = Path('docs/source')
__lowerCamelCase : str = ['favicon.ico']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
| 652 | 1 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
a =logging.get_logger(__name__)
class A_ :
_UpperCAmelCase : str
_UpperCAmelCase : str = None
@staticmethod
def lowerCAmelCase ( ):
raise NotImplementedError
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : Optional[Any]):
raise NotImplementedError
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
raise NotImplementedError
def lowerCAmelCase ( self : Union[str, Any]):
if not self.is_available():
raise RuntimeError(
F"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.")
@classmethod
def lowerCAmelCase ( cls : Optional[int]):
return F"`pip install {cls.pip_package or cls.name}`"
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = '''optuna'''
@staticmethod
def lowerCAmelCase ( ):
return is_optuna_available()
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : Dict):
return run_hp_search_optuna(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple):
return default_hp_space_optuna(SCREAMING_SNAKE_CASE__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = '''ray'''
_UpperCAmelCase : Tuple = '''\'ray[tune]\''''
@staticmethod
def lowerCAmelCase ( ):
return is_ray_available()
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : Dict):
return run_hp_search_ray(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
return default_hp_space_ray(SCREAMING_SNAKE_CASE__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = '''sigopt'''
@staticmethod
def lowerCAmelCase ( ):
return is_sigopt_available()
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : Dict):
return run_hp_search_sigopt(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int):
return default_hp_space_sigopt(SCREAMING_SNAKE_CASE__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = '''wandb'''
@staticmethod
def lowerCAmelCase ( ):
return is_wandb_available()
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : Any):
return run_hp_search_wandb(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Any):
return default_hp_space_wandb(SCREAMING_SNAKE_CASE__)
a ={
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def SCREAMING_SNAKE_CASE__ ( ) -> str:
__lowerCamelCase : Dict = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(lowerCamelCase__ ) > 0:
__lowerCamelCase : Optional[int] = available_backends[0].name
if len(lowerCamelCase__ ) > 1:
logger.info(
F"{len(lowerCamelCase__ )} hyperparameter search backends available. Using {name} as the default." )
return name
raise RuntimeError(
'No hyperparameter search backend available.\n'
+ '\n'.join(
F" - To install {backend.name} run {backend.pip_install()}"
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 652 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ="""▁"""
a ={"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
a ={
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
a ={"""vinai/bartpho-syllable""": 1024}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Any="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="<s>" ,SCREAMING_SNAKE_CASE__ : int="<unk>" ,SCREAMING_SNAKE_CASE__ : Dict="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<mask>" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token
__lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : int = vocab_file
__lowerCamelCase : Tuple = monolingual_vocab_file
__lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__))
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : List[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids:
__lowerCamelCase : Any = cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE__ ,'r' ,encoding='utf-8') as f:
for line in f.readlines():
__lowerCamelCase : Any = line.strip().split()[0]
__lowerCamelCase : List[str] = len(self.fairseq_tokens_to_ids)
if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids:
__lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids)
__lowerCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int):
__lowerCamelCase : Tuple = self.__dict__.copy()
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : str = {}
__lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : Tuple = [self.cls_token_id]
__lowerCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Dict = [self.sep_token_id]
__lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def lowerCAmelCase ( self : List[str]):
return len(self.fairseq_ids_to_tokens)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict):
return self.fairseq_ids_to_tokens[index]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Any = ''.join(SCREAMING_SNAKE_CASE__).replace(SCREAMING_SNAKE_CASE__ ,' ').strip()
return out_string
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,)
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
SCREAMING_SNAKE_CASE__) and os.path.isfile(self.monolingual_vocab_file):
copyfile(self.monolingual_vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.monolingual_vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"{str(SCREAMING_SNAKE_CASE__)} \n")
return out_vocab_file, out_monolingual_vocab_file
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={"""configuration_sew""": ["""SEW_PRETRAINED_CONFIG_ARCHIVE_MAP""", """SEWConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""SEW_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SEWForCTC""",
"""SEWForSequenceClassification""",
"""SEWModel""",
"""SEWPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict=1_3 ,SCREAMING_SNAKE_CASE__ : int=3_0 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4 ,SCREAMING_SNAKE_CASE__ : List[str]=3_7 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,):
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Dict = image_size
__lowerCamelCase : Optional[Any] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : str = is_training
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Dict = type_sequence_label_size
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : List[str] = scope
__lowerCamelCase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCamelCase : str = (image_size // patch_size) ** 2
__lowerCamelCase : str = num_patches + 2
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : List[Any] = None
if self.use_labels:
__lowerCamelCase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : List[Any]):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Optional[Any] = TFDeiTModel(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Optional[int] = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__lowerCamelCase : int = 1
__lowerCamelCase : Tuple = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : Dict = self.type_sequence_label_size
__lowerCamelCase : List[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
# test greyscale images
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Tuple = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : List[Any] = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[int] = False
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = TFDeiTModelTester(self)
__lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7)
def lowerCAmelCase ( self : str):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Dict):
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer))
__lowerCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,tf.keras.layers.Dense))
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str=False):
__lowerCamelCase : Optional[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__)
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
__lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : List[Any]):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224')
__lowerCamelCase : int = self.default_image_processor
__lowerCamelCase : Tuple = prepare_img()
__lowerCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='tf')
# forward pass
__lowerCamelCase : int = model(**SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : Optional[int] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tf.constant([-1.0266, 0.1912, -1.2861])
self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 652 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
if index == r:
for j in range(lowerCamelCase__ ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__lowerCamelCase : Tuple = arr[i]
combination_util(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , index + 1 , lowerCamelCase__ , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
# A temporary array to store all combination one by one
__lowerCamelCase : Tuple = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , 0 , lowerCamelCase__ , 0 )
if __name__ == "__main__":
# Driver code to check the function above
a =[10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 652 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
_UpperCAmelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 652 | 1 |
import random
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
__lowerCamelCase : Any = num - 1
__lowerCamelCase : Optional[Any] = 0
while s % 2 == 0:
__lowerCamelCase : Dict = s // 2
t += 1
for _ in range(5 ):
__lowerCamelCase : Optional[int] = random.randrange(2 , num - 1 )
__lowerCamelCase : Optional[int] = pow(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if v != 1:
__lowerCamelCase : Dict = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__lowerCamelCase : str = i + 1
__lowerCamelCase : Union[str, Any] = (v**2) % num
return True
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
if num < 2:
return False
__lowerCamelCase : Any = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_2_4 ) -> int:
while True:
__lowerCamelCase : List[Any] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(lowerCamelCase__ ):
return num
if __name__ == "__main__":
a =generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 652 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 652 | 1 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list:
__lowerCamelCase : Union[str, Any] = [True] * n
__lowerCamelCase : List[Any] = False
__lowerCamelCase : int = False
__lowerCamelCase : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__lowerCamelCase : List[str] = i * 2
while index < n:
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = index + i
__lowerCamelCase : Optional[Any] = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int:
__lowerCamelCase : Optional[Any] = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0
__lowerCamelCase : Dict = prime_sieve(lowerCamelCase__ )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 0
__lowerCamelCase : Any = primes[prime_index]
while (last_prime**2) <= limit:
__lowerCamelCase : Any = primes[prime_index + 1]
__lowerCamelCase : Optional[Any] = last_prime**2
__lowerCamelCase : Dict = next_prime**2
# Get numbers divisible by lps(current)
__lowerCamelCase : Tuple = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__lowerCamelCase : Any = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__lowerCamelCase : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__lowerCamelCase : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 652 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """vocab.txt"""}
a ={
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
a ={
"""openbmb/cpm-ant-10b""": 1024,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : int = collections.OrderedDict()
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as reader:
__lowerCamelCase : Optional[int] = reader.readlines()
for index, token in enumerate(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = token.rstrip('\n' )
__lowerCamelCase : Union[str, Any] = index
return vocab
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_0):
__lowerCamelCase : str = vocab
__lowerCamelCase : Dict = unk_token
__lowerCamelCase : int = max_input_chars_per_word
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : int = list(SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > self.max_input_chars_per_word:
return [self.unk_token]
__lowerCamelCase : Tuple = 0
__lowerCamelCase : str = []
while start < len(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = None
while start < end:
__lowerCamelCase : Any = ''.join(chars[start:end])
if substr in self.vocab:
__lowerCamelCase : Optional[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = end
return sub_tokens
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : Optional[int] = False
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple="<d>" ,SCREAMING_SNAKE_CASE__ : Tuple="</d>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" ,SCREAMING_SNAKE_CASE__ : str="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<unk>" ,SCREAMING_SNAKE_CASE__ : List[Any]="</n>" ,SCREAMING_SNAKE_CASE__ : int="</_>" ,SCREAMING_SNAKE_CASE__ : List[Any]="left" ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
requires_backends(self ,['jieba'])
super().__init__(
bod_token=SCREAMING_SNAKE_CASE__ ,eod_token=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,line_token=SCREAMING_SNAKE_CASE__ ,space_token=SCREAMING_SNAKE_CASE__ ,padding_side=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Optional[Any] = bod_token
__lowerCamelCase : Dict = eod_token
__lowerCamelCase : Any = load_vocab(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.encoder[space_token]
__lowerCamelCase : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowerCamelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1]))
__lowerCamelCase : int = {v: k for k, v in self.encoder.items()}
__lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token)
@property
def lowerCAmelCase ( self : List[Any]):
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : Tuple):
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : Union[str, Any]):
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : str):
return len(self.encoder)
def lowerCAmelCase ( self : str):
return dict(self.encoder ,**self.added_tokens_encoder)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Any = []
for x in jieba.cut(SCREAMING_SNAKE_CASE__ ,cut_all=SCREAMING_SNAKE_CASE__):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__))
return output_tokens
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Tuple = [i for i in token_ids if i >= 0]
__lowerCamelCase : str = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any]):
return token in self.encoder
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]):
return "".join(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]):
return self.encoder.get(SCREAMING_SNAKE_CASE__ ,self.encoder.get(self.unk_token))
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
return self.decoder.get(SCREAMING_SNAKE_CASE__ ,self.unk_token)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if os.path.isdir(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
__lowerCamelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
__lowerCamelCase : Any = 0
if " " in self.encoder:
__lowerCamelCase : Any = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__lowerCamelCase : str = self.encoder['\n']
del self.encoder["\n"]
__lowerCamelCase : str = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1]))
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!')
__lowerCamelCase : Any = token_index
writer.write(token + '\n')
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : List[int] = None):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
| 652 | 1 |
class A_ : # Public class to implement a graph
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : list[list[bool]]):
__lowerCamelCase : Optional[Any] = row
__lowerCamelCase : Optional[int] = col
__lowerCamelCase : Union[str, Any] = graph
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : list[list[bool]]):
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : list[list[bool]]):
# Checking all 8 elements surrounding nth element
__lowerCamelCase : Dict = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__lowerCamelCase : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1]
__lowerCamelCase : Tuple = True # Make those cells visited
for k in range(8):
if self.is_safe(i + row_nbr[k] ,j + col_nbr[k] ,SCREAMING_SNAKE_CASE__):
self.diffs(i + row_nbr[k] ,j + col_nbr[k] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]): # And finally, count all islands.
__lowerCamelCase : int = [[False for j in range(self.COL)] for i in range(self.ROW)]
__lowerCamelCase : Any = 0
for i in range(self.ROW):
for j in range(self.COL):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
count += 1
return count
| 652 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a ={
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = (UnCLIPScheduler,)
def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Any = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**SCREAMING_SNAKE_CASE__)
return config
def lowerCAmelCase ( self : Optional[Any]):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log')
__lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range')
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = 0.5
assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5
assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5
assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : str = self.get_scheduler_config()
__lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = scheduler.timesteps
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter
__lowerCamelCase : List[str] = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Optional[Any] = pred_prev_sample
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 252.2682495) < 1E-2
assert abs(result_mean.item() - 0.3284743) < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(2_5)
__lowerCamelCase : int = scheduler.timesteps
__lowerCamelCase : Tuple = self.dummy_model()
__lowerCamelCase : Any = self.dummy_sample_deter
__lowerCamelCase : Any = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if i + 1 == timesteps.shape[0]:
__lowerCamelCase : Optional[Any] = None
else:
__lowerCamelCase : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : int = scheduler.step(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Union[str, Any] = pred_prev_sample
__lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 258.2044983) < 1E-2
assert abs(result_mean.item() - 0.3362038) < 1E-3
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Union[str, Any]):
pass
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={"""configuration_ibert""": ["""IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """IBertConfig""", """IBertOnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""IBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""IBertForMaskedLM""",
"""IBertForMultipleChoice""",
"""IBertForQuestionAnswering""",
"""IBertForSequenceClassification""",
"""IBertForTokenClassification""",
"""IBertModel""",
"""IBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ibert import (
IBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
IBertForMaskedLM,
IBertForMultipleChoice,
IBertForQuestionAnswering,
IBertForSequenceClassification,
IBertForTokenClassification,
IBertModel,
IBertPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = '''swin2sr'''
_UpperCAmelCase : Any = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=1_8_0 ,SCREAMING_SNAKE_CASE__ : Any=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : int=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : int="1conv" ,SCREAMING_SNAKE_CASE__ : Optional[int]="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = image_size
__lowerCamelCase : str = patch_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : Dict = depths
__lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = num_heads
__lowerCamelCase : Tuple = window_size
__lowerCamelCase : Dict = mlp_ratio
__lowerCamelCase : str = qkv_bias
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Dict = use_absolute_embeddings
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : List[Any] = upscale
__lowerCamelCase : List[Any] = img_range
__lowerCamelCase : List[str] = resi_connection
__lowerCamelCase : Union[str, Any] = upsampler
| 652 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = (UnCLIPScheduler,)
def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Any = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**SCREAMING_SNAKE_CASE__)
return config
def lowerCAmelCase ( self : Optional[Any]):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log')
__lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range')
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = 0.5
assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5
assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5
assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : str = self.get_scheduler_config()
__lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = scheduler.timesteps
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter
__lowerCamelCase : List[str] = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Optional[Any] = pred_prev_sample
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 252.2682495) < 1E-2
assert abs(result_mean.item() - 0.3284743) < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(2_5)
__lowerCamelCase : int = scheduler.timesteps
__lowerCamelCase : Tuple = self.dummy_model()
__lowerCamelCase : Any = self.dummy_sample_deter
__lowerCamelCase : Any = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if i + 1 == timesteps.shape[0]:
__lowerCamelCase : Optional[Any] = None
else:
__lowerCamelCase : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : int = scheduler.step(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Union[str, Any] = pred_prev_sample
__lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 258.2044983) < 1E-2
assert abs(result_mean.item() - 0.3362038) < 1E-3
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Union[str, Any]):
pass
| 652 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict:
__lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {}
__lowerCamelCase : int = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]:
__lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,):
super().__init__()
__lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source')
__lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target')
__lowerCamelCase : List[Any] = self.get_char_lens(self.src_file)
__lowerCamelCase : List[Any] = max_source_length
__lowerCamelCase : List[str] = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Optional[int] = prefix
if n_obs is not None:
__lowerCamelCase : Dict = self.src_lens[:n_obs]
__lowerCamelCase : str = src_lang
__lowerCamelCase : Any = tgt_lang
def __len__( self : Tuple):
return len(self.src_lens)
def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Dict = index + 1 # linecache starts at 1
__lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
__lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowerCamelCase : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
)
__lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
__lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right')
__lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right')
__lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int):
return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()]
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch])
__lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch])
__lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch])
__lowerCamelCase : Optional[int] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
a =getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return list(itertools.chain.from_iterable(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
__lowerCamelCase : str = get_git_info()
save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]:
with open(lowerCamelCase__ , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
with open(lowerCamelCase__ ) as f:
return json.load(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ )
__lowerCamelCase : Any = {
'repo_id': str(lowerCamelCase__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List:
return list(map(lowerCamelCase__ , lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
with open(lowerCamelCase__ , 'wb' ) as f:
return pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
def remove_articles(lowerCamelCase__ ):
return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
__lowerCamelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ )
__lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
__lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
__lowerCamelCase : Dict = 0
for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ):
em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
em /= len(lowerCamelCase__ )
return {"em": em}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
return model_prefix.startswith('rag' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowerCamelCase : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
continue
__lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p]
setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
return hparams, config
| 652 | 1 |
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = None ) -> str:
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowerCamelCase : Tuple = quote(lowerCamelCase__ )
return hfh.hf_hub_url(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' , revision=lowerCamelCase__ )
| 652 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a =logging.get_logger(__name__)
# General docstring
a ="""MobileNetV1Config"""
# Base docstring
a ="""google/mobilenet_v1_1.0_224"""
a =[1, 1024, 7, 7]
# Image classification docstring
a ="""google/mobilenet_v1_1.0_224"""
a ="""tabby, tabby cat"""
a =[
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> str:
__lowerCamelCase : str = {}
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : int = model.mobilenet_va
else:
__lowerCamelCase : List[str] = model
__lowerCamelCase : List[Any] = 'MobilenetV1/Conv2d_0/'
__lowerCamelCase : List[Any] = backbone.conv_stem.convolution.weight
__lowerCamelCase : List[str] = backbone.conv_stem.normalization.bias
__lowerCamelCase : Tuple = backbone.conv_stem.normalization.weight
__lowerCamelCase : Union[str, Any] = backbone.conv_stem.normalization.running_mean
__lowerCamelCase : Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
__lowerCamelCase : Any = i + 1
__lowerCamelCase : Union[str, Any] = i * 2
__lowerCamelCase : Optional[Any] = backbone.layer[pt_index]
__lowerCamelCase : Optional[int] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
__lowerCamelCase : Tuple = pointer.convolution.weight
__lowerCamelCase : Optional[Any] = pointer.normalization.bias
__lowerCamelCase : Union[str, Any] = pointer.normalization.weight
__lowerCamelCase : List[str] = pointer.normalization.running_mean
__lowerCamelCase : Union[str, Any] = pointer.normalization.running_var
__lowerCamelCase : int = backbone.layer[pt_index + 1]
__lowerCamelCase : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
__lowerCamelCase : Optional[Any] = pointer.convolution.weight
__lowerCamelCase : Any = pointer.normalization.bias
__lowerCamelCase : str = pointer.normalization.weight
__lowerCamelCase : Dict = pointer.normalization.running_mean
__lowerCamelCase : List[str] = pointer.normalization.running_var
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__lowerCamelCase : Any = model.classifier.weight
__lowerCamelCase : int = model.classifier.bias
return tf_to_pt_map
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__lowerCamelCase : List[str] = tf.train.list_variables(lowerCamelCase__ )
__lowerCamelCase : List[str] = {}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
__lowerCamelCase : Any = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = array
# Build TF to PyTorch weights loading map
__lowerCamelCase : Tuple = _build_tf_to_pytorch_map(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
__lowerCamelCase : Optional[int] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__lowerCamelCase : List[str] = np.transpose(lowerCamelCase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__lowerCamelCase : Any = array.squeeze().transpose()
else:
__lowerCamelCase : Tuple = np.transpose(lowerCamelCase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
__lowerCamelCase : Optional[Any] = torch.from_numpy(lowerCamelCase__ )
tf_weights.pop(lowerCamelCase__ , lowerCamelCase__ )
tf_weights.pop(name + '/RMSProp' , lowerCamelCase__ )
tf_weights.pop(name + '/RMSProp_1' , lowerCamelCase__ )
tf_weights.pop(name + '/ExponentialMovingAverage' , lowerCamelCase__ )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> torch.Tensor:
__lowerCamelCase , __lowerCamelCase : int = features.shape[-2:]
__lowerCamelCase , __lowerCamelCase : List[str] = conv_layer.stride
__lowerCamelCase , __lowerCamelCase : str = conv_layer.kernel_size
if in_height % stride_height == 0:
__lowerCamelCase : Optional[int] = max(kernel_height - stride_height , 0 )
else:
__lowerCamelCase : Union[str, Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__lowerCamelCase : List[str] = max(kernel_width - stride_width , 0 )
else:
__lowerCamelCase : List[str] = max(kernel_width - (in_width % stride_width) , 0 )
__lowerCamelCase : List[str] = pad_along_width // 2
__lowerCamelCase : Optional[int] = pad_along_width - pad_left
__lowerCamelCase : Any = pad_along_height // 2
__lowerCamelCase : List[Any] = pad_along_height - pad_top
__lowerCamelCase : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase__ , lowerCamelCase__ , 'constant' , 0.0 )
class A_ ( nn.Module ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[bool] = True ,SCREAMING_SNAKE_CASE__ : Optional[bool or str] = True ,):
super().__init__()
__lowerCamelCase : Dict = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
__lowerCamelCase : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2)
__lowerCamelCase : Optional[int] = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ ,padding_mode='zeros' ,)
if use_normalization:
__lowerCamelCase : Optional[int] = nn.BatchNormad(
num_features=SCREAMING_SNAKE_CASE__ ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=SCREAMING_SNAKE_CASE__ ,track_running_stats=SCREAMING_SNAKE_CASE__ ,)
else:
__lowerCamelCase : Dict = None
if use_activation:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : str = ACTaFN[config.hidden_act]
else:
__lowerCamelCase : List[str] = config.hidden_act
else:
__lowerCamelCase : List[str] = None
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.Tensor):
if self.config.tf_padding:
__lowerCamelCase : Any = apply_tf_padding(SCREAMING_SNAKE_CASE__ ,self.convolution)
__lowerCamelCase : Optional[int] = self.convolution(SCREAMING_SNAKE_CASE__)
if self.normalization is not None:
__lowerCamelCase : Dict = self.normalization(SCREAMING_SNAKE_CASE__)
if self.activation is not None:
__lowerCamelCase : List[str] = self.activation(SCREAMING_SNAKE_CASE__)
return features
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = MobileNetVaConfig
_UpperCAmelCase : List[str] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[str] = '''mobilenet_v1'''
_UpperCAmelCase : Any = '''pixel_values'''
_UpperCAmelCase : int = False
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[nn.Linear, nn.Convad]):
if isinstance(SCREAMING_SNAKE_CASE__ ,(nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ ,nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
a =r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a =r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : bool = True):
super().__init__(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = config
__lowerCamelCase : Optional[int] = 3_2
__lowerCamelCase : List[str] = max(int(depth * config.depth_multiplier) ,config.min_depth)
__lowerCamelCase : Optional[Any] = MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=config.num_channels ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=2 ,)
__lowerCamelCase : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__lowerCamelCase : str = nn.ModuleList()
for i in range(1_3):
__lowerCamelCase : str = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__lowerCamelCase : str = max(int(depth * config.depth_multiplier) ,config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=strides[i] ,groups=SCREAMING_SNAKE_CASE__ ,))
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,))
__lowerCamelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict):
raise NotImplementedError
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,):
__lowerCamelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
__lowerCamelCase : Optional[Any] = self.conv_stem(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
__lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__)
if output_hidden_states:
__lowerCamelCase : Any = all_hidden_states + (hidden_states,)
__lowerCamelCase : Optional[Any] = hidden_states
if self.pooler is not None:
__lowerCamelCase : Tuple = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE__) ,start_dim=1)
else:
__lowerCamelCase : List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__ ,)
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig):
super().__init__(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = config.num_labels
__lowerCamelCase : Optional[Any] = MobileNetVaModel(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__lowerCamelCase : Any = nn.Dropout(config.classifier_dropout_prob ,inplace=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,):
__lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Optional[int] = self.mobilenet_va(SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : List[str] = self.classifier(self.dropout(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase : Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase : int = 'single_label_classification'
else:
__lowerCamelCase : Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowerCamelCase : Tuple = MSELoss()
if self.num_labels == 1:
__lowerCamelCase : int = loss_fct(logits.squeeze() ,labels.squeeze())
else:
__lowerCamelCase : Union[str, Any] = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase : List[str] = CrossEntropyLoss()
__lowerCamelCase : List[str] = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase : int = BCEWithLogitsLoss()
__lowerCamelCase : int = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if not return_dict:
__lowerCamelCase : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states ,)
| 652 | 1 |
import datasets
from .evaluate import evaluate
a ="""\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
a ="""
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
a ="""
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Any):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': {
'id': datasets.Value('string'),
'prediction_text': datasets.features.Sequence(datasets.Value('string')),
},
'references': {
'id': datasets.Value('string'),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string'),
'answer_start': datasets.Value('int32'),
}),
},
}) ,codebase_urls=['https://www.atticusprojectai.org/cuad'] ,reference_urls=['https://www.atticusprojectai.org/cuad'] ,)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : Dict = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
__lowerCamelCase : List[str] = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
__lowerCamelCase : str = evaluate(dataset=SCREAMING_SNAKE_CASE__ ,predictions=SCREAMING_SNAKE_CASE__)
return score
| 652 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = cva.getAffineTransform(lowerCamelCase__ , lowerCamelCase__ )
return cva.warpAffine(lowerCamelCase__ , lowerCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
a =cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
a =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a , a =gray_img.shape
# set different points to rotate image
a =np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
a =np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
a =np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
a =np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
a =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a =plt.figure(1)
a =["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 652 | 1 |
import itertools
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = 2
while True:
if is_prime(lowerCamelCase__ ):
yield num
num += 1
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0_0_1 ) -> int:
return next(itertools.islice(prime_generator() , nth - 1 , lowerCamelCase__ ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 652 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCamelCase__ )
if number < 1:
__lowerCamelCase : int = F"Input value of [number={number}] must be > 0"
raise ValueError(lowerCamelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__lowerCamelCase : Any = int(math.log(number // 3 , 2 ) ) + 2
__lowerCamelCase : List[Any] = [3, 5]
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : List[str] = 3
for block in range(1 , lowerCamelCase__ ):
for _ in range(lowerCamelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a =0
try:
a =proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 652 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 652 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 ,SCREAMING_SNAKE_CASE__ : int=4_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
__lowerCamelCase : str = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : int = num_channels
__lowerCamelCase : Dict = min_resolution
__lowerCamelCase : Tuple = max_resolution
__lowerCamelCase : Dict = do_resize
__lowerCamelCase : List[Any] = size
__lowerCamelCase : Tuple = do_normalize
__lowerCamelCase : Any = image_mean
__lowerCamelCase : List[str] = image_std
__lowerCamelCase : List[Any] = do_rescale
__lowerCamelCase : str = rescale_factor
__lowerCamelCase : Tuple = do_pad
def lowerCAmelCase ( self : Dict):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]=False):
if not batched:
__lowerCamelCase : Optional[Any] = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ ,Image.Image):
__lowerCamelCase , __lowerCamelCase : Any = image.size
else:
__lowerCamelCase , __lowerCamelCase : Any = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase : Optional[int] = int(self.size['shortest_edge'] * h / w)
__lowerCamelCase : Tuple = self.size['shortest_edge']
elif w > h:
__lowerCamelCase : Union[str, Any] = self.size['shortest_edge']
__lowerCamelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h)
else:
__lowerCamelCase : List[Any] = self.size['shortest_edge']
__lowerCamelCase : Optional[int] = self.size['shortest_edge']
else:
__lowerCamelCase : List[str] = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__lowerCamelCase : Tuple = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[0])[0]
__lowerCamelCase : Dict = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[int] = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = DetaImageProcessingTester(self)
@property
def lowerCAmelCase ( self : Any):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_rescale'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_pad'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size'))
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size ,{'shortest_edge': 1_8, 'longest_edge': 1_3_3_3})
self.assertEqual(image_processor.do_pad ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
pass
def lowerCAmelCase ( self : List[str]):
# Initialize image_processing
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image)
# Test not batched input
__lowerCamelCase : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase ( self : str):
# Initialize image_processing
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray)
# Test not batched input
__lowerCamelCase : Tuple = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase ( self : int):
# Initialize image_processing
__lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor)
# Test not batched input
__lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def lowerCAmelCase ( self : Optional[Any]):
# prepare image and target
__lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r') as f:
__lowerCamelCase : List[str] = json.loads(f.read())
__lowerCamelCase : Union[str, Any] = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
__lowerCamelCase : Optional[int] = DetaImageProcessor()
__lowerCamelCase : int = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
# verify pixel values
__lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
# verify area
__lowerCamelCase : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__))
# verify boxes
__lowerCamelCase : int = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# verify image_id
__lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__))
# verify is_crowd
__lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__))
# verify class_labels
__lowerCamelCase : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__))
# verify orig_size
__lowerCamelCase : str = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__))
# verify size
__lowerCamelCase : int = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
@slow
def lowerCAmelCase ( self : str):
# prepare image, target and masks_path
__lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r') as f:
__lowerCamelCase : Tuple = json.loads(f.read())
__lowerCamelCase : List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
__lowerCamelCase : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
__lowerCamelCase : List[str] = DetaImageProcessor(format='coco_panoptic')
__lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,masks_path=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
# verify pixel values
__lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
# verify area
__lowerCamelCase : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__))
# verify boxes
__lowerCamelCase : Tuple = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# verify image_id
__lowerCamelCase : int = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__))
# verify is_crowd
__lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__))
# verify class_labels
__lowerCamelCase : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__))
# verify masks
__lowerCamelCase : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,SCREAMING_SNAKE_CASE__)
# verify orig_size
__lowerCamelCase : Any = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__))
# verify size
__lowerCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
| 652 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
a =random.Random()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=1.0 , lowerCamelCase__=None , lowerCamelCase__=None ) -> Any:
if rng is None:
__lowerCamelCase : Optional[int] = global_rng
__lowerCamelCase : Any = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class A_ ( unittest.TestCase ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Any=7 ,SCREAMING_SNAKE_CASE__ : Optional[int]=4_0_0 ,SCREAMING_SNAKE_CASE__ : Any=2_0_0_0 ,SCREAMING_SNAKE_CASE__ : List[str]=1_0 ,SCREAMING_SNAKE_CASE__ : int=1_6_0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.0 ,SCREAMING_SNAKE_CASE__ : Dict=4_0_0_0 ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : str=True ,):
__lowerCamelCase : str = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Any = min_seq_length
__lowerCamelCase : Tuple = max_seq_length
__lowerCamelCase : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCamelCase : List[Any] = padding_value
__lowerCamelCase : List[Any] = sampling_rate
__lowerCamelCase : Tuple = return_attention_mask
__lowerCamelCase : Optional[Any] = do_normalize
__lowerCamelCase : List[str] = feature_size
__lowerCamelCase : Any = chunk_length
__lowerCamelCase : Optional[int] = hop_length
def lowerCAmelCase ( self : Union[str, Any]):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : str=False):
def _flatten(SCREAMING_SNAKE_CASE__ : Any):
return list(itertools.chain(*SCREAMING_SNAKE_CASE__))
if equal_length:
__lowerCamelCase : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__lowerCamelCase : Tuple = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff)
]
if numpify:
__lowerCamelCase : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE__) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = WhisperFeatureExtractor if is_speech_available() else None
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : int = WhisperFeatureExtractionTester(self)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : List[str] = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE__)[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = feat_extract_first.to_dict()
__lowerCamelCase : List[Any] = feat_extract_second.to_dict()
__lowerCamelCase : List[str] = feat_extract_first.mel_filters
__lowerCamelCase : int = feat_extract_second.mel_filters
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ ,'feat_extract.json')
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = feat_extract_first.to_dict()
__lowerCamelCase : Dict = feat_extract_second.to_dict()
__lowerCamelCase : str = feat_extract_first.mel_filters
__lowerCamelCase : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
# Tests that all call wrap to encode_plus and batch_encode_plus
__lowerCamelCase : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
# create three inputs of length 800, 1000, and 1200
__lowerCamelCase : Union[str, Any] = [floats_list((1, x))[0] for x in range(8_0_0 ,1_4_0_0 ,2_0_0)]
__lowerCamelCase : Optional[Any] = [np.asarray(SCREAMING_SNAKE_CASE__) for speech_input in speech_inputs]
# Test feature size
__lowerCamelCase : Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ ,padding='max_length' ,return_tensors='np').input_features
self.assertTrue(input_features.ndim == 3)
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames)
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size)
# Test not batched input
__lowerCamelCase : Any = feature_extractor(speech_inputs[0] ,return_tensors='np').input_features
__lowerCamelCase : Any = feature_extractor(np_speech_inputs[0] ,return_tensors='np').input_features
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# Test batched
__lowerCamelCase : Optional[int] = feature_extractor(SCREAMING_SNAKE_CASE__ ,return_tensors='np').input_features
__lowerCamelCase : List[Any] = feature_extractor(SCREAMING_SNAKE_CASE__ ,return_tensors='np').input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# Test 2-D numpy arrays are batched.
__lowerCamelCase : str = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__lowerCamelCase : Tuple = np.asarray(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = feature_extractor(SCREAMING_SNAKE_CASE__ ,return_tensors='np').input_features
__lowerCamelCase : int = feature_extractor(SCREAMING_SNAKE_CASE__ ,return_tensors='np').input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# Test truncation required
__lowerCamelCase : int = [floats_list((1, x))[0] for x in range(2_0_0 ,(feature_extractor.n_samples + 5_0_0) ,2_0_0)]
__lowerCamelCase : Tuple = [np.asarray(SCREAMING_SNAKE_CASE__) for speech_input in speech_inputs]
__lowerCamelCase : Any = [x[: feature_extractor.n_samples] for x in speech_inputs]
__lowerCamelCase : Dict = [np.asarray(SCREAMING_SNAKE_CASE__) for speech_input in speech_inputs_truncated]
__lowerCamelCase : str = feature_extractor(SCREAMING_SNAKE_CASE__ ,return_tensors='np').input_features
__lowerCamelCase : List[str] = feature_extractor(SCREAMING_SNAKE_CASE__ ,return_tensors='np').input_features
for enc_seq_a, enc_seq_a in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
def lowerCAmelCase ( self : Dict):
import torch
__lowerCamelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCamelCase : int = np.random.rand(1_0_0 ,3_2).astype(np.floataa)
__lowerCamelCase : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCamelCase : Tuple = feature_extractor.pad([{'input_features': inputs}] ,return_tensors='np')
self.assertTrue(np_processed.input_features.dtype == np.floataa)
__lowerCamelCase : Any = feature_extractor.pad([{'input_features': inputs}] ,return_tensors='pt')
self.assertTrue(pt_processed.input_features.dtype == torch.floataa)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Dict = load_dataset('hf-internal-testing/librispeech_asr_dummy' ,'clean' ,split='validation')
# automatic decoding with librispeech
__lowerCamelCase : Any = ds.sort('id').select(range(SCREAMING_SNAKE_CASE__))[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowerCAmelCase ( self : Optional[Any]):
# fmt: off
__lowerCamelCase : List[Any] = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
])
# fmt: on
__lowerCamelCase : Any = self._load_datasamples(1)
__lowerCamelCase : int = WhisperFeatureExtractor()
__lowerCamelCase : Dict = feature_extractor(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').input_features
self.assertEqual(input_features.shape ,(1, 8_0, 3_0_0_0))
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict())
__lowerCamelCase : Optional[int] = self._load_datasamples(1)[0]
__lowerCamelCase : Dict = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
__lowerCamelCase : List[str] = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=SCREAMING_SNAKE_CASE__)[0]
self.assertTrue(np.all(np.mean(SCREAMING_SNAKE_CASE__) < 1E-3))
self.assertTrue(np.all(np.abs(np.var(SCREAMING_SNAKE_CASE__) - 1) < 1E-3))
| 652 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = KandinskyVaaControlnetPipeline
_UpperCAmelCase : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : int = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Tuple = False
@property
def lowerCAmelCase ( self : Tuple):
return 3_2
@property
def lowerCAmelCase ( self : List[Any]):
return 3_2
@property
def lowerCAmelCase ( self : str):
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[str]):
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : List[str]):
return 1_0_0
@property
def lowerCAmelCase ( self : Dict):
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__)
return model
@property
def lowerCAmelCase ( self : Union[str, Any]):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Optional[Any]):
torch.manual_seed(0)
__lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = self.dummy_unet
__lowerCamelCase : List[Any] = self.dummy_movq
__lowerCamelCase : str = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='linear' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Dict = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=0):
__lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to(
SCREAMING_SNAKE_CASE__)
# create hint
__lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = 'cpu'
__lowerCamelCase : Tuple = self.get_dummy_components()
__lowerCamelCase : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = output.images
__lowerCamelCase : Tuple = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0]
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : List[str] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : int):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy')
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
__lowerCamelCase : Tuple = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__)).float() / 255.0
__lowerCamelCase : str = hint.permute(2 ,0 ,1).unsqueeze(0)
__lowerCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa)
pipe_prior.to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa)
__lowerCamelCase : int = pipeline.to(SCREAMING_SNAKE_CASE__)
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 'A robot, 4k photo'
__lowerCamelCase : List[str] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe_prior(
SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
__lowerCamelCase : Optional[Any] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase : Any = pipeline(
image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,hint=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,output_type='np' ,)
__lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
| 652 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return "".join([hex(lowerCamelCase__ )[2:].zfill(2 ).upper() for byte in list(lowerCamelCase__ )] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowerCamelCase__ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowerCamelCase__ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(lowerCamelCase__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
_UpperCAmelCase : int = XGLMConfig
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Tuple = '''gelu'''
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_4 ,SCREAMING_SNAKE_CASE__ : Tuple=7 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=9_9 ,SCREAMING_SNAKE_CASE__ : str=3_2 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 ,SCREAMING_SNAKE_CASE__ : Tuple=3_7 ,SCREAMING_SNAKE_CASE__ : Tuple="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=5_1_2 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : str = seq_length
__lowerCamelCase : Optional[Any] = is_training
__lowerCamelCase : Any = use_input_mask
__lowerCamelCase : str = use_labels
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : Dict = d_model
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : List[str] = ffn_dim
__lowerCamelCase : Optional[Any] = activation_function
__lowerCamelCase : Tuple = activation_dropout
__lowerCamelCase : Union[str, Any] = attention_dropout
__lowerCamelCase : List[str] = max_position_embeddings
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Any = None
__lowerCamelCase : List[str] = 0
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Dict = 1
def lowerCAmelCase ( self : Any):
return XGLMConfig.from_pretrained('facebook/xglm-564M')
def lowerCAmelCase ( self : str):
__lowerCamelCase : Any = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) ,clip_value_min=0 ,clip_value_max=3)
__lowerCamelCase : Dict = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : int = self.get_config()
__lowerCamelCase : Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase ( self : List[Any]):
return XGLMConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=SCREAMING_SNAKE_CASE__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : int):
__lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Any = config_and_inputs
__lowerCamelCase : str = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_UpperCAmelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
_UpperCAmelCase : str = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Union[str, Any] = False
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Tuple = TFXGLMModelTester(self)
__lowerCamelCase : int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,n_embd=3_7)
def lowerCAmelCase ( self : List[Any]):
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase ( self : str):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.')
def lowerCAmelCase ( self : Union[str, Any]):
super().test_resize_token_embeddings()
@require_tf
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True):
__lowerCamelCase : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : int = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] ,dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : Optional[Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowerCamelCase : int = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Tuple = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
tf.random.set_seed(0)
__lowerCamelCase : Optional[Any] = tokenizer('Today is a nice day and' ,return_tensors='tf')
__lowerCamelCase : List[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0'):
__lowerCamelCase : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,seed=[7, 0])
__lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : Union[str, Any] = 'left'
# use different length sentences to test batching
__lowerCamelCase : List[str] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors='tf' ,padding=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = inputs['input_ids']
__lowerCamelCase : Dict = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=inputs['attention_mask'] ,max_new_tokens=1_2)
__lowerCamelCase : Tuple = tokenizer(sentences[0] ,return_tensors='tf').input_ids
__lowerCamelCase : List[str] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2)
__lowerCamelCase : Any = tokenizer(sentences[1] ,return_tensors='tf').input_ids
__lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2)
__lowerCamelCase : int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tokenizer.decode(output_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,[non_padded_sentence, padded_sentence])
| 652 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
@register_to_config
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int = 3_2 ,SCREAMING_SNAKE_CASE__ : int = 6_4 ,SCREAMING_SNAKE_CASE__ : int = 2_0 ,SCREAMING_SNAKE_CASE__ : int = 7_6_8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=7_7 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,SCREAMING_SNAKE_CASE__ : float = 0.0 ,SCREAMING_SNAKE_CASE__ : str = "silu" ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = None ,SCREAMING_SNAKE_CASE__ : Optional[str] = "linear" ,SCREAMING_SNAKE_CASE__ : Optional[str] = "prd" ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,):
super().__init__()
__lowerCamelCase : Dict = num_attention_heads
__lowerCamelCase : Dict = attention_head_dim
__lowerCamelCase : Optional[Any] = num_attention_heads * attention_head_dim
__lowerCamelCase : Dict = additional_embeddings
__lowerCamelCase : List[str] = time_embed_dim or inner_dim
__lowerCamelCase : int = embedding_proj_dim or embedding_dim
__lowerCamelCase : Tuple = clip_embed_dim or embedding_dim
__lowerCamelCase : Optional[int] = Timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,0)
__lowerCamelCase : int = TimestepEmbedding(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,out_dim=SCREAMING_SNAKE_CASE__ ,act_fn=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if embedding_proj_norm_type is None:
__lowerCamelCase : Union[str, Any] = None
elif embedding_proj_norm_type == "layer":
__lowerCamelCase : Union[str, Any] = nn.LayerNorm(SCREAMING_SNAKE_CASE__)
else:
raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}")
__lowerCamelCase : str = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if encoder_hid_proj_type is None:
__lowerCamelCase : Union[str, Any] = None
elif encoder_hid_proj_type == "linear":
__lowerCamelCase : Any = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
else:
raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}")
__lowerCamelCase : Tuple = nn.Parameter(torch.zeros(1 ,num_embeddings + additional_embeddings ,SCREAMING_SNAKE_CASE__))
if added_emb_type == "prd":
__lowerCamelCase : List[str] = nn.Parameter(torch.zeros(1 ,1 ,SCREAMING_SNAKE_CASE__))
elif added_emb_type is None:
__lowerCamelCase : Tuple = None
else:
raise ValueError(
F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.")
__lowerCamelCase : List[str] = nn.ModuleList(
[
BasicTransformerBlock(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,dropout=SCREAMING_SNAKE_CASE__ ,activation_fn='gelu' ,attention_bias=SCREAMING_SNAKE_CASE__ ,)
for d in range(SCREAMING_SNAKE_CASE__)
])
if norm_in_type == "layer":
__lowerCamelCase : List[str] = nn.LayerNorm(SCREAMING_SNAKE_CASE__)
elif norm_in_type is None:
__lowerCamelCase : Optional[int] = None
else:
raise ValueError(F"Unsupported norm_in_type: {norm_in_type}.")
__lowerCamelCase : Tuple = nn.LayerNorm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = nn.Linear(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] ,-10000.0)
causal_attention_mask.triu_(1)
__lowerCamelCase : int = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' ,SCREAMING_SNAKE_CASE__ ,persistent=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = nn.Parameter(torch.zeros(1 ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[str] = nn.Parameter(torch.zeros(1 ,SCREAMING_SNAKE_CASE__))
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase ( self : str):
__lowerCamelCase : Dict = {}
def fn_recursive_add_processors(SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : torch.nn.Module ,SCREAMING_SNAKE_CASE__ : Dict[str, AttentionProcessor]):
if hasattr(SCREAMING_SNAKE_CASE__ ,'set_processor'):
__lowerCamelCase : int = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return processors
for name, module in self.named_children():
fn_recursive_add_processors(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return processors
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
__lowerCamelCase : Tuple = len(self.attn_processors.keys())
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) and len(SCREAMING_SNAKE_CASE__) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(SCREAMING_SNAKE_CASE__)} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes.")
def fn_recursive_attn_processor(SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : torch.nn.Module ,SCREAMING_SNAKE_CASE__ : Tuple):
if hasattr(SCREAMING_SNAKE_CASE__ ,'set_processor'):
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
module.set_processor(SCREAMING_SNAKE_CASE__)
else:
module.set_processor(processor.pop(F"{name}.processor"))
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
for name, module in self.named_children():
fn_recursive_attn_processor(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
self.set_attn_processor(AttnProcessor())
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Union[torch.Tensor, float, int] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.BoolTensor] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : int = hidden_states.shape[0]
__lowerCamelCase : Dict = timestep
if not torch.is_tensor(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Union[str, Any] = torch.tensor([timesteps] ,dtype=torch.long ,device=hidden_states.device)
elif torch.is_tensor(SCREAMING_SNAKE_CASE__) and len(timesteps.shape) == 0:
__lowerCamelCase : Optional[Any] = timesteps[None].to(hidden_states.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCamelCase : Union[str, Any] = timesteps * torch.ones(SCREAMING_SNAKE_CASE__ ,dtype=timesteps.dtype ,device=timesteps.device)
__lowerCamelCase : Optional[Any] = self.time_proj(SCREAMING_SNAKE_CASE__)
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCamelCase : Any = timesteps_projected.to(dtype=self.dtype)
__lowerCamelCase : Union[str, Any] = self.time_embedding(SCREAMING_SNAKE_CASE__)
if self.embedding_proj_norm is not None:
__lowerCamelCase : str = self.embedding_proj_norm(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = self.embedding_proj(SCREAMING_SNAKE_CASE__)
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCamelCase : Optional[int] = self.encoder_hidden_states_proj(SCREAMING_SNAKE_CASE__)
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set')
__lowerCamelCase : Tuple = self.proj_in(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.positional_embedding.to(hidden_states.dtype)
__lowerCamelCase : str = []
__lowerCamelCase : Union[str, Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(SCREAMING_SNAKE_CASE__)
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape) == 2:
__lowerCamelCase : Optional[int] = proj_embeddings[:, None, :]
if len(hidden_states.shape) == 2:
__lowerCamelCase : List[str] = hidden_states[:, None, :]
__lowerCamelCase : Optional[int] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCamelCase : Optional[Any] = self.prd_embedding.to(hidden_states.dtype).expand(SCREAMING_SNAKE_CASE__ ,-1 ,-1)
additional_embeds.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = torch.cat(
SCREAMING_SNAKE_CASE__ ,dim=1 ,)
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCamelCase : Dict = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCamelCase : Any = F.pad(
SCREAMING_SNAKE_CASE__ ,(
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) ,value=0.0 ,)
__lowerCamelCase : Any = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCamelCase : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
__lowerCamelCase : Tuple = F.pad(SCREAMING_SNAKE_CASE__ ,(0, self.additional_embeddings) ,value=0.0)
__lowerCamelCase : List[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype)
__lowerCamelCase : str = attention_mask.repeat_interleave(self.config.num_attention_heads ,dim=0)
if self.norm_in is not None:
__lowerCamelCase : Optional[Any] = self.norm_in(SCREAMING_SNAKE_CASE__)
for block in self.transformer_blocks:
__lowerCamelCase : Optional[Any] = block(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.norm_out(SCREAMING_SNAKE_CASE__)
if self.prd_embedding is not None:
__lowerCamelCase : List[Any] = hidden_states[:, -1]
else:
__lowerCamelCase : Optional[Any] = hidden_states[:, additional_embeddings_len:]
__lowerCamelCase : Optional[Any] = self.proj_to_clip_embeddings(SCREAMING_SNAKE_CASE__)
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Optional[Any] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 652 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 652 | 1 |
import math
import unittest
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Union[str, Any]):
self.assertTrue(is_prime(2))
self.assertTrue(is_prime(3))
self.assertTrue(is_prime(5))
self.assertTrue(is_prime(7))
self.assertTrue(is_prime(1_1))
self.assertTrue(is_prime(1_3))
self.assertTrue(is_prime(1_7))
self.assertTrue(is_prime(1_9))
self.assertTrue(is_prime(2_3))
self.assertTrue(is_prime(2_9))
def lowerCAmelCase ( self : Optional[Any]):
with self.assertRaises(SCREAMING_SNAKE_CASE__):
is_prime(-1_9)
self.assertFalse(
is_prime(0) ,'Zero doesn\'t have any positive factors, primes must have exactly two.' ,)
self.assertFalse(
is_prime(1) ,'One only has 1 positive factor, primes must have exactly two.' ,)
self.assertFalse(is_prime(2 * 2))
self.assertFalse(is_prime(2 * 3))
self.assertFalse(is_prime(3 * 3))
self.assertFalse(is_prime(3 * 5))
self.assertFalse(is_prime(3 * 5 * 7))
if __name__ == "__main__":
unittest.main()
| 652 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a =logging.get_logger(__name__)
# General docstring
a ="""RegNetConfig"""
# Base docstring
a ="""facebook/regnet-y-040"""
a =[1, 1088, 7, 7]
# Image classification docstring
a ="""facebook/regnet-y-040"""
a ="""tabby, tabby cat"""
a =[
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCamelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
__lowerCamelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,strides=SCREAMING_SNAKE_CASE__ ,padding='VALID' ,groups=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution' ,)
__lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
__lowerCamelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : List[Any] = self.convolution(self.padding(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Dict):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = config.num_channels
__lowerCamelCase : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[int] = shape_list(SCREAMING_SNAKE_CASE__)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCamelCase : Optional[int] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 2, 3, 1))
__lowerCamelCase : List[Any] = self.embedder(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,strides=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution')
__lowerCamelCase : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False):
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__)
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
__lowerCamelCase : Dict = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='relu' ,name='attention.0'),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2'),
]
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCamelCase : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE__)
for layer_module in self.attention:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = hidden_state * pooled
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[Any]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = in_channels != out_channels or stride != 1
__lowerCamelCase : Union[str, Any] = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : Dict = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.2'),
]
__lowerCamelCase : Dict = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : int = hidden_state
for layer_module in self.layers:
__lowerCamelCase : List[str] = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : int = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[str]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = in_channels != out_channels or stride != 1
__lowerCamelCase : Tuple = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : int = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetSELayer(SCREAMING_SNAKE_CASE__ ,reduced_channels=int(round(in_channels / 4)) ,name='layer.2'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.3'),
]
__lowerCamelCase : List[Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[int] = hidden_state
for layer_module in self.layers:
__lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : Any = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
__lowerCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='layers.0'),
*[layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,name=F"layers.{i+1}") for i in range(depth - 1)],
]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]):
for layer_module in self.layers:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,))
__lowerCamelCase : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ ,config.depths[1:])):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,depth=SCREAMING_SNAKE_CASE__ ,name=F"stages.{i+1}"))
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True):
__lowerCamelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase : Optional[Any] = hidden_states + (hidden_state,)
__lowerCamelCase : str = stage_module(SCREAMING_SNAKE_CASE__)
if output_hidden_states:
__lowerCamelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__)
@keras_serializable
class A_ ( tf.keras.layers.Layer ):
_UpperCAmelCase : List[Any] = RegNetConfig
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = config
__lowerCamelCase : Optional[int] = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ ,name='embedder')
__lowerCamelCase : Union[str, Any] = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ ,name='encoder')
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
@unpack_inputs
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,):
__lowerCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = encoder_outputs[0]
__lowerCamelCase : int = self.pooler(SCREAMING_SNAKE_CASE__)
# Change to NCHW output format have uniformity in the modules
__lowerCamelCase : Union[str, Any] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
__lowerCamelCase : str = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCamelCase : Union[str, Any] = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = RegNetConfig
_UpperCAmelCase : Optional[int] = '''regnet'''
_UpperCAmelCase : List[Any] = '''pixel_values'''
@property
def lowerCAmelCase ( self : int):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa)}
a =r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
a =r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : int=False ,):
__lowerCamelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : str):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = config.num_labels
__lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
# classification head
__lowerCamelCase : Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='classifier.1') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Any=False ,):
__lowerCamelCase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : str = self.regnet(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : Optional[Any] = self.classifier[0](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.classifier[1](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__)
if not return_dict:
__lowerCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states)
| 652 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
a =False
@skip_mps
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = StableDiffusionAttendAndExcitePipeline
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : List[str] = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase : int = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
_UpperCAmelCase : int = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase : Optional[int] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCAmelCase ( cls : Optional[int]):
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : Tuple):
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
torch.manual_seed(0)
__lowerCamelCase : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) ,layers_per_block=1 ,sample_size=3_2 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=3_2 ,attention_head_dim=(2, 4) ,use_linear_projection=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : str = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule='scaled_linear' ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,)
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=1_2_8 ,)
torch.manual_seed(0)
__lowerCamelCase : Tuple = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='gelu' ,projection_dim=5_1_2 ,)
__lowerCamelCase : List[Any] = CLIPTextModel(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
__lowerCamelCase : str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0):
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Dict = 'cpu'
__lowerCamelCase : Dict = self.get_dummy_components()
__lowerCamelCase : List[str] = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = pipe(**SCREAMING_SNAKE_CASE__).images
__lowerCamelCase : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 6_4, 6_4, 3))
__lowerCamelCase : Optional[Any] = np.array(
[0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496])
__lowerCamelCase : Dict = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ ,1E-3)
def lowerCAmelCase ( self : int):
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4)
def lowerCAmelCase ( self : List[str]):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def lowerCAmelCase ( self : Any):
self._test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=7E-4)
def lowerCAmelCase ( self : List[str]):
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def lowerCAmelCase ( self : Dict):
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4)
def lowerCAmelCase ( self : List[Any]):
super().test_save_load_local(expected_max_difference=5E-4)
def lowerCAmelCase ( self : List[Any]):
super().test_save_load_optional_components(expected_max_difference=4E-4)
@require_torch_gpu
@slow
class A_ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase ( cls : int):
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : Dict):
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int):
__lowerCamelCase : Optional[Any] = torch.manual_seed(5_1)
__lowerCamelCase : Union[str, Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,safety_checker=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa)
pipe.to('cuda')
__lowerCamelCase : int = 'a painting of an elephant with glasses'
__lowerCamelCase : Tuple = [5, 7]
__lowerCamelCase : Union[str, Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ ,token_indices=SCREAMING_SNAKE_CASE__ ,guidance_scale=7.5 ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,max_iter_to_alter=5 ,output_type='numpy' ,).images[0]
__lowerCamelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy')
assert np.abs((expected_image - image).max()) < 5E-1
| 652 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a =object()
# For specifying empty leaf dict `{}`
a =object()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Tuple = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(lowerCamelCase__ ) - len(lowerCamelCase__ ) + 1 ):
__lowerCamelCase : str = [x.match(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , ks[i:] )]
if matches and all(lowerCamelCase__ ):
return True
return False
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
def replace(lowerCamelCase__ , lowerCamelCase__ ):
for rule, replacement in rules:
if _match(lowerCamelCase__ , lowerCamelCase__ ):
return replacement
return val
return replace
def SCREAMING_SNAKE_CASE__ ( ) -> str:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , lowerCamelCase__ )),
(("transformer", "wte", "embedding"), P('mp' , lowerCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = _get_partition_rules()
__lowerCamelCase : Optional[Any] = _replacement_rules(lowerCamelCase__ )
__lowerCamelCase : Tuple = {k: _unmatched for k in flatten_dict(lowerCamelCase__ )}
__lowerCamelCase : List[Any] = {k: replace(lowerCamelCase__ , lowerCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCamelCase__ ) )
| 652 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = '''nllb-moe'''
_UpperCAmelCase : List[Any] = ['''past_key_values''']
_UpperCAmelCase : List[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple=1_2_8_1_1_2 ,SCREAMING_SNAKE_CASE__ : Tuple=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1_2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : List[Any]=1_6 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2 ,SCREAMING_SNAKE_CASE__ : int=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : Tuple=1_6 ,SCREAMING_SNAKE_CASE__ : Dict=0.05 ,SCREAMING_SNAKE_CASE__ : str=0.05 ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : str="relu" ,SCREAMING_SNAKE_CASE__ : List[Any]=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : int=0.1 ,SCREAMING_SNAKE_CASE__ : int=0.0 ,SCREAMING_SNAKE_CASE__ : Dict=0.02 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2 ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=False ,SCREAMING_SNAKE_CASE__ : Dict="float32" ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : List[str]=1_2_8 ,SCREAMING_SNAKE_CASE__ : Any=6_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=4 ,SCREAMING_SNAKE_CASE__ : Tuple=4 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.001 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.001 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="all" ,SCREAMING_SNAKE_CASE__ : List[str]=False ,SCREAMING_SNAKE_CASE__ : int=False ,SCREAMING_SNAKE_CASE__ : Dict=1.0 ,SCREAMING_SNAKE_CASE__ : Any=0.2 ,SCREAMING_SNAKE_CASE__ : Any=1 ,SCREAMING_SNAKE_CASE__ : str=0 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Any=False ,**SCREAMING_SNAKE_CASE__ : Tuple ,):
__lowerCamelCase : List[Any] = vocab_size
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : List[str] = d_model
__lowerCamelCase : List[Any] = encoder_ffn_dim
__lowerCamelCase : Any = encoder_layers
__lowerCamelCase : Optional[int] = encoder_attention_heads
__lowerCamelCase : Any = decoder_ffn_dim
__lowerCamelCase : Any = decoder_layers
__lowerCamelCase : Dict = decoder_attention_heads
__lowerCamelCase : Optional[int] = dropout
__lowerCamelCase : Optional[Any] = attention_dropout
__lowerCamelCase : List[Any] = activation_dropout
__lowerCamelCase : List[str] = activation_function
__lowerCamelCase : Union[str, Any] = init_std
__lowerCamelCase : str = encoder_layerdrop
__lowerCamelCase : int = decoder_layerdrop
__lowerCamelCase : Dict = use_cache
__lowerCamelCase : Any = encoder_layers
__lowerCamelCase : str = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase : Tuple = router_z_loss_coef
__lowerCamelCase : int = router_aux_loss_coef
__lowerCamelCase : Dict = decoder_sparse_step
__lowerCamelCase : int = encoder_sparse_step
__lowerCamelCase : Optional[Any] = num_experts
__lowerCamelCase : Tuple = expert_capacity
__lowerCamelCase : Tuple = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
__lowerCamelCase : Union[str, Any] = router_dtype
__lowerCamelCase : List[str] = router_ignore_padding_tokens
__lowerCamelCase : Union[str, Any] = batch_prioritized_routing
__lowerCamelCase : Optional[Any] = second_expert_policy
__lowerCamelCase : Union[str, Any] = normalize_router_prob_before_dropping
__lowerCamelCase : Union[str, Any] = moe_eval_capacity_token_fraction
__lowerCamelCase : Optional[int] = moe_token_dropout
__lowerCamelCase : Dict = output_router_logits
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,bos_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,decoder_start_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
| 652 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list:
__lowerCamelCase : Union[str, Any] = [True] * n
__lowerCamelCase : List[Any] = False
__lowerCamelCase : int = False
__lowerCamelCase : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__lowerCamelCase : List[str] = i * 2
while index < n:
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = index + i
__lowerCamelCase : Optional[Any] = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int:
__lowerCamelCase : Optional[Any] = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0
__lowerCamelCase : Dict = prime_sieve(lowerCamelCase__ )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 0
__lowerCamelCase : Any = primes[prime_index]
while (last_prime**2) <= limit:
__lowerCamelCase : Any = primes[prime_index + 1]
__lowerCamelCase : Optional[Any] = last_prime**2
__lowerCamelCase : Dict = next_prime**2
# Get numbers divisible by lps(current)
__lowerCamelCase : Tuple = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__lowerCamelCase : Any = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__lowerCamelCase : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__lowerCamelCase : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 652 | 1 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : List[str] = ProphetNetTokenizer
_UpperCAmelCase : Optional[Any] = False
def lowerCAmelCase ( self : Any):
super().setUp()
__lowerCamelCase : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file ,'w' ,encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = 'UNwant\u00E9d,running'
__lowerCamelCase : str = 'unwanted, running'
return input_text, output_text
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Optional[Any] = self.tokenizer_class(self.vocab_file)
__lowerCamelCase : Dict = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__) ,[9, 6, 7, 1_2, 1_0, 1_1])
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') ,['ah', '\u535A', '\u63A8', 'zz'])
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') ,['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') ,['hello'])
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') ,['hällo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') ,['h\u00E9llo'])
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : str = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') ,['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') ,['hello'])
def lowerCAmelCase ( self : int):
__lowerCamelCase : str = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') ,['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') ,['hello'])
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Optional[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Any = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'])
def lowerCAmelCase ( self : str):
__lowerCamelCase : int = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Any = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE__ ,never_split=['[UNK]'])
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowerCamelCase : int = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Any = i
__lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE__ ,unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') ,[])
self.assertListEqual(tokenizer.tokenize('unwanted running') ,['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running') ,['[UNK]', 'runn', '##ing'])
@require_torch
def lowerCAmelCase ( self : Any):
__lowerCamelCase : int = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased')
__lowerCamelCase : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCamelCase : Any = [1_0_3_7, 2_1_4_6, 2_0_4_2_3, 2_0_0_5, 7_6_8_0, 7_8_4_9, 3_9_8_9, 1_0_1_2, 1_0_2]
__lowerCamelCase : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = list(batch.input_ids.numpy()[0])
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual((2, 9) ,batch.input_ids.shape)
self.assertEqual((2, 9) ,batch.attention_mask.shape)
def lowerCAmelCase ( self : Union[str, Any]):
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\u00A0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def lowerCAmelCase ( self : str):
self.assertTrue(_is_control('\u0005'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def lowerCAmelCase ( self : List[str]):
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
@slow
def lowerCAmelCase ( self : int):
__lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased')
__lowerCamelCase : List[str] = tokenizer.encode('sequence builders' ,add_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = tokenizer.encode('multi-sequence build' ,add_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
assert encoded_sentence == text + [1_0_2]
assert encoded_pair == text + [1_0_2] + text_a + [1_0_2]
| 652 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = 1
@register_to_config
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.15 ,SCREAMING_SNAKE_CASE__ : float = 0.01 ,SCREAMING_SNAKE_CASE__ : float = 1348.0 ,SCREAMING_SNAKE_CASE__ : float = 1E-5 ,SCREAMING_SNAKE_CASE__ : int = 1 ,):
# standard deviation of the initial noise distribution
__lowerCamelCase : int = sigma_max
# setable values
__lowerCamelCase : List[str] = None
self.set_sigmas(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[int] = None):
return sample
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None):
__lowerCamelCase : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowerCamelCase : Optional[int] = torch.linspace(1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None):
__lowerCamelCase : Optional[int] = sigma_min if sigma_min is not None else self.config.sigma_min
__lowerCamelCase : Optional[int] = sigma_max if sigma_max is not None else self.config.sigma_max
__lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowerCamelCase : Optional[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE__) ,math.log(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str]):
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
__lowerCamelCase : List[str] = timestep * torch.ones(
sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
__lowerCamelCase : str = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowerCamelCase : Dict = timesteps.to(self.discrete_sigmas.device)
__lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device)
__lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).to(sample.device)
__lowerCamelCase : int = torch.zeros_like(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowerCamelCase : int = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
__lowerCamelCase : List[Any] = diffusion.unsqueeze(-1)
__lowerCamelCase : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowerCamelCase : int = randn_tensor(
sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__ ,device=sample.device ,dtype=sample.dtype)
__lowerCamelCase : Optional[int] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowerCamelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ ,prev_sample_mean=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowerCamelCase : Optional[int] = randn_tensor(sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__).to(sample.device)
# compute step size from the model_output, the noise, and the snr
__lowerCamelCase : str = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowerCamelCase : Optional[int] = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
__lowerCamelCase : List[str] = step_size.unsqueeze(-1)
__lowerCamelCase : str = sample + step_size * model_output
__lowerCamelCase : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase : int = timesteps.to(original_samples.device)
__lowerCamelCase : Any = self.discrete_sigmas.to(original_samples.device)[timesteps]
__lowerCamelCase : Optional[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(SCREAMING_SNAKE_CASE__) * sigmas[:, None, None, None]
)
__lowerCamelCase : str = noise + original_samples
return noisy_samples
def __len__( self : Optional[int]):
return self.config.num_train_timesteps
| 652 | 1 |
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list:
__lowerCamelCase : Tuple = int(lowerCamelCase__ )
if n_element < 1:
__lowerCamelCase : Optional[Any] = ValueError('a should be a positive number' )
raise my_error
__lowerCamelCase : str = [1]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = (0, 0, 0)
__lowerCamelCase : Dict = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
a =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
a =hamming(int(n))
print("""-----------------------------------------------------""")
print(F"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 652 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : str = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : int = features[:, labels == i]
__lowerCamelCase : Optional[int] = data.mean(1 )
# Centralize the data of class i
__lowerCamelCase : int = data - column_reshape(lowerCamelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCamelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : Union[str, Any] = np.dot(lowerCamelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = features.mean(1 )
__lowerCamelCase : Union[str, Any] = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = features[:, labels == i]
__lowerCamelCase : Union[str, Any] = data.shape[1]
__lowerCamelCase : Union[str, Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : List[str] = device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
__lowerCamelCase : Tuple = features.mean(1 )
# Center the dataset
__lowerCamelCase : Any = features - np.reshape(lowerCamelCase__ , (data_mean.size, 1) )
__lowerCamelCase : Optional[int] = np.dot(lowerCamelCase__ , centered_data.T ) / features.shape[1]
__lowerCamelCase , __lowerCamelCase : List[Any] = np.linalg.eigh(lowerCamelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowerCamelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowerCamelCase : int = np.dot(filtered_eigenvectors.T , lowerCamelCase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowerCamelCase , __lowerCamelCase : Dict = eigh(
covariance_between_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , covariance_within_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , )
__lowerCamelCase : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = np.linalg.svd(lowerCamelCase__ )
__lowerCamelCase : int = svd_matrix[:, 0:dimensions]
__lowerCamelCase : Optional[int] = np.dot(filtered_svd_matrix.T , lowerCamelCase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
__lowerCamelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowerCamelCase : Optional[int] = np.array([0, 0, 0, 1, 1] )
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Tuple = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : int = linear_discriminant_analysis(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if isinstance(lowerCamelCase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowerCamelCase : Dict = 2
__lowerCamelCase : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : Optional[Any] = principal_component_analysis(lowerCamelCase__ , lowerCamelCase__ )
if not np.allclose(lowerCamelCase__ , lowerCamelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class A_ :
_UpperCAmelCase : float
_UpperCAmelCase : TreeNode | None = None
_UpperCAmelCase : TreeNode | None = None
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
# Validation
def is_valid_tree(lowerCamelCase__ ) -> bool:
if node is None:
return True
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowerCamelCase__ ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowerCamelCase__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowerCamelCase__ )
)
return is_binary_search_tree_recursive_check(lowerCamelCase__ , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))]
if identifier is not None:
__lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
for n_ in n_identifier:
__lowerCamelCase : Optional[int] = [file for file in files if n_ not in file]
else:
__lowerCamelCase : Dict = [file for file in files if n_identifier not in file]
__lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py')
__lowerCamelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,SCREAMING_SNAKE_CASE__)
if only_modules:
__lowerCamelCase : Optional[int] = file.split('.')[0]
try:
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__)
self.assertIs(len(result.failures) ,0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed ,0)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = 'modeling'
__lowerCamelCase : Dict = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = Path('src/transformers')
__lowerCamelCase : Optional[int] = 'tokenization'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = Path('src/transformers')
__lowerCamelCase : str = 'configuration'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = Path('docs/source')
__lowerCamelCase : str = ['favicon.ico']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
| 652 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : str = '''dandelin/vilt-b32-finetuned-vqa'''
_UpperCAmelCase : Union[str, Any] = (
'''This is a tool that answers a question about an image. It takes an input named `image` which should be the '''
'''image containing the information, as well as a `question` which should be the question in English. It '''
'''returns a text that is the answer to the question.'''
)
_UpperCAmelCase : Dict = '''image_qa'''
_UpperCAmelCase : Dict = AutoProcessor
_UpperCAmelCase : Tuple = AutoModelForVisualQuestionAnswering
_UpperCAmelCase : List[str] = ['''image''', '''text''']
_UpperCAmelCase : List[str] = ['''text''']
def __init__( self : Any ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : str):
requires_backends(self ,['vision'])
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : "Image" ,SCREAMING_SNAKE_CASE__ : str):
return self.pre_processor(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]):
with torch.no_grad():
return self.model(**SCREAMING_SNAKE_CASE__).logits
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Tuple = outputs.argmax(-1).item()
return self.model.config.idalabel[idx]
| 652 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ="""▁"""
a ={"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
a ={
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
a ={"""vinai/bartpho-syllable""": 1024}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Any="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="<s>" ,SCREAMING_SNAKE_CASE__ : int="<unk>" ,SCREAMING_SNAKE_CASE__ : Dict="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<mask>" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token
__lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : int = vocab_file
__lowerCamelCase : Tuple = monolingual_vocab_file
__lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__))
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : List[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids:
__lowerCamelCase : Any = cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE__ ,'r' ,encoding='utf-8') as f:
for line in f.readlines():
__lowerCamelCase : Any = line.strip().split()[0]
__lowerCamelCase : List[str] = len(self.fairseq_tokens_to_ids)
if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids:
__lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids)
__lowerCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int):
__lowerCamelCase : Tuple = self.__dict__.copy()
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : str = {}
__lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : Tuple = [self.cls_token_id]
__lowerCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Dict = [self.sep_token_id]
__lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def lowerCAmelCase ( self : List[str]):
return len(self.fairseq_ids_to_tokens)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict):
return self.fairseq_ids_to_tokens[index]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Any = ''.join(SCREAMING_SNAKE_CASE__).replace(SCREAMING_SNAKE_CASE__ ,' ').strip()
return out_string
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,)
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
SCREAMING_SNAKE_CASE__) and os.path.isfile(self.monolingual_vocab_file):
copyfile(self.monolingual_vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.monolingual_vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"{str(SCREAMING_SNAKE_CASE__)} \n")
return out_vocab_file, out_monolingual_vocab_file
| 652 | 1 |
import numpy as np
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1e-12 , lowerCamelCase__ = 1_0_0 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowerCamelCase__ )[0] == np.shape(lowerCamelCase__ )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCamelCase__ )[0] == np.shape(lowerCamelCase__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCamelCase__ ) == np.iscomplexobj(lowerCamelCase__ )
__lowerCamelCase : str = np.iscomplexobj(lowerCamelCase__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCamelCase__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowerCamelCase : Dict = False
__lowerCamelCase : int = 0
__lowerCamelCase : Any = 0
__lowerCamelCase : str = 1e12
while not convergence:
# Multiple matrix by the vector.
__lowerCamelCase : Tuple = np.dot(lowerCamelCase__ , lowerCamelCase__ )
# Normalize the resulting output vector.
__lowerCamelCase : List[Any] = w / np.linalg.norm(lowerCamelCase__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowerCamelCase : Optional[Any] = vector.conj().T if is_complex else vector.T
__lowerCamelCase : List[str] = np.dot(lowerCamelCase__ , np.dot(lowerCamelCase__ , lowerCamelCase__ ) )
# Check convergence.
__lowerCamelCase : Dict = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowerCamelCase : List[str] = True
__lowerCamelCase : List[str] = lambda_
if is_complex:
__lowerCamelCase : Any = np.real(lambda_ )
return lambda_, vector
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : Union[str, Any] = np.array([[4_1, 4, 2_0], [4, 2_6, 3_0], [2_0, 3_0, 5_0]] )
__lowerCamelCase : Optional[int] = np.array([4_1, 4, 2_0] )
__lowerCamelCase : List[Any] = real_input_matrix.astype(np.complexaaa )
__lowerCamelCase : Union[str, Any] = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowerCamelCase : Optional[int] = np.array([4_1, 4, 2_0] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowerCamelCase : Union[str, Any] = real_input_matrix
__lowerCamelCase : Union[str, Any] = real_vector
elif problem_type == "complex":
__lowerCamelCase : Tuple = complex_input_matrix
__lowerCamelCase : Dict = complex_vector
# Our implementation.
__lowerCamelCase , __lowerCamelCase : Optional[int] = power_iteration(lowerCamelCase__ , lowerCamelCase__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowerCamelCase , __lowerCamelCase : Optional[int] = np.linalg.eigh(lowerCamelCase__ )
# Last eigenvalue is the maximum one.
__lowerCamelCase : Optional[int] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowerCamelCase : Optional[int] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCamelCase__ ) - np.abs(lowerCamelCase__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 652 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict=1_3 ,SCREAMING_SNAKE_CASE__ : int=3_0 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4 ,SCREAMING_SNAKE_CASE__ : List[str]=3_7 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,):
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Dict = image_size
__lowerCamelCase : Optional[Any] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : str = is_training
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Dict = type_sequence_label_size
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : List[str] = scope
__lowerCamelCase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCamelCase : str = (image_size // patch_size) ** 2
__lowerCamelCase : str = num_patches + 2
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : List[Any] = None
if self.use_labels:
__lowerCamelCase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : List[Any]):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Optional[Any] = TFDeiTModel(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Optional[int] = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__lowerCamelCase : int = 1
__lowerCamelCase : Tuple = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : Dict = self.type_sequence_label_size
__lowerCamelCase : List[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
# test greyscale images
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Tuple = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : List[Any] = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[int] = False
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = TFDeiTModelTester(self)
__lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7)
def lowerCAmelCase ( self : str):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Dict):
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer))
__lowerCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,tf.keras.layers.Dense))
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str=False):
__lowerCamelCase : Optional[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__)
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
__lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : List[Any]):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224')
__lowerCamelCase : int = self.default_image_processor
__lowerCamelCase : Tuple = prepare_img()
__lowerCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='tf')
# forward pass
__lowerCamelCase : int = model(**SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : Optional[int] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tf.constant([-1.0266, 0.1912, -1.2861])
self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 652 | 1 |
import json
import os
import unittest
from typing import Tuple
from transformers import WavaVecaPhonemeCTCTokenizer
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.models.wavaveca_phoneme.tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizerOutput
from transformers.testing_utils import require_phonemizer
from ...test_tokenization_common import TokenizerTesterMixin
@require_phonemizer
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Tuple = WavaVecaPhonemeCTCTokenizer
_UpperCAmelCase : int = False
def lowerCAmelCase ( self : Optional[Any]):
super().setUp()
__lowerCamelCase : Optional[Any] = (
'<s> <pad> </s> <unk> n s t ə l a i k d m ɛ ɾ e ɪ p o ɐ z ð f j v b ɹ ʁ ʊ iː r w ʌ u ɡ æ aɪ ʃ h ɔ ɑː '
'ŋ ɚ eɪ β uː y ɑ̃ oʊ ᵻ eː θ aʊ ts oː ɔ̃ ɣ ɜ ɑ dʒ əl x ɜː ç ʒ tʃ ɔː ɑːɹ ɛ̃ ʎ ɔːɹ ʋ aː ɕ œ ø oːɹ ɲ yː '
'ʔ iə i5 s. tɕ ?? nʲ ɛː œ̃ ɭ ɔø ʑ tʲ ɨ ɛɹ ts. rʲ ɪɹ ɭʲ i.5 ɔɪ q sʲ u5 ʊɹ iɜ a5 iɛ5 øː ʕ ja əɜ th ɑ5 '
'oɪ dʲ ə5 tɕh ts.h mʲ ɯ dʑ vʲ e̞ tʃʲ ei5 o5 onɡ5 ɑu5 iɑ5 ai5 aɪɚ kh ə1 ʐ i2 ʉ ħ t[ aɪə ʲ ju ə2 u2 oɜ '
'pː iɛɜ ou5 y5 uɜ tː uo5 d[ uoɜ tsh ɑɜ ɵ i̪5 uei5 ɟ aɜ ɑɨ i.ɜ eʊ o2 ɐ̃ ä pʲ kʲ n̩ ɒ ph ɑu2 uɨ əɪ ɫ ɬ '
'yɜ bʲ ɑ2 s̪ aiɜ χ ɐ̃ʊ̃ 1 ə4 yæɜ a2 ɨː t̪ iouɜ ũ onɡɜ aɨ iɛ2 ɔɨ ɑuɜ o̞ ei2 iou2 c kː y2 ɖ oe dˤ yɛɜ '
'əʊ S ɡʲ onɡ2 u" eiɜ ʈ ɯᵝ iou5 dZ r̝̊ i.2 tS s^ ʝ yə5 iɑɜ uə5 pf ɨu iɑ2 ou2 ər2 fʲ ai2 r̝ uəɜ ɳ əɨ '
'ua5 uɪ ɽ bː yu5 uo2 yɛ5 l̩ ɻ ərɜ ʂ i̪2 ouɜ uaɜ a. a.ː yæ5 dː r̩ ee ɪu ər5 i̪ ɜ æi u: i.ː t^ o1 ɪ^ '
'ai ueiɜ æː ɛɪ eə i. ɴ ie ua2 ɑ1 o4 tʃː o: ɑ: u1 N i̪1 au yæ2 u. qː yəɜ y: kʰ tʃʰ iʊ sx õ uo tʰ '
'uai5 bʰ u.ː uə2 ʊə d^ s̪ː yiɜ dʰ r. oe: i1 ɟː yu2 nʲʲ i̪4 uei2 tsʲ ɸ ĩ ɑ4 t̪ː eɑ u4 e: tsː ʈʰ ɡʰ '
'ɯɯ dʒʲ ʂʲ X ɵː uaiɜ tɕʲ ã t^ː ẽː yɛ2 cː i.1 ɛʊ dˤdˤ dʒː i4 ɡː yi ɕʲ ɟʰ pʰ dʑʲ yuɜ ua1 ua4 æiː ɐɐ '
'ui iou1 ʊː a1 iou4 cʰ iɛ1 yə2 ɖʰ ẽ ʒʲ ää ər4 iːː ɪː iɑ1 ər1 œː øi ɪuː cʰcʰ əː1 iː1 ũ kʰː o̞o̞ xʲ '
'ou1 iɛ4 e̞e̞ y1 dzː dʲʲ dʰː ɯᵝɯᵝ lː uo1 i.4 i: yɛ5ʲ a4'
).split(' ')
__lowerCamelCase : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__))))
__lowerCamelCase : List[str] = {'pad_token': '<pad>', 'unk_token': '<unk>', 'bos_token': '<s>', 'eos_token': '</s>'}
__lowerCamelCase : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file ,'w' ,encoding='utf-8') as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__) + '\n')
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : List[Any]=2_0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5):
__lowerCamelCase : Optional[int] = [(i, tokenizer.decode([i] ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__)) for i in range(len(SCREAMING_SNAKE_CASE__))]
__lowerCamelCase : List[str] = list(filter(lambda SCREAMING_SNAKE_CASE__: [t[0]] == tokenizer.encode(t[1] ,do_phonemize=SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__))
if max_length is not None and len(SCREAMING_SNAKE_CASE__) > max_length:
__lowerCamelCase : Tuple = toks[:max_length]
if min_length is not None and len(SCREAMING_SNAKE_CASE__) < min_length and len(SCREAMING_SNAKE_CASE__) > 0:
while len(SCREAMING_SNAKE_CASE__) < min_length:
__lowerCamelCase : List[Any] = toks + toks
# toks_str = [t[1] for t in toks]
__lowerCamelCase : str = [t[0] for t in toks]
# Ensure consistency
__lowerCamelCase : Optional[int] = tokenizer.decode(SCREAMING_SNAKE_CASE__ ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__)
if " " not in output_txt and len(SCREAMING_SNAKE_CASE__) > 1:
__lowerCamelCase : Union[str, Any] = (
tokenizer.decode([toks_ids[0]] ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__)
+ ' '
+ tokenizer.decode(toks_ids[1:] ,clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__)
)
if with_prefix_space:
__lowerCamelCase : str = ' ' + output_txt
__lowerCamelCase : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
return output_txt, output_ids
def lowerCAmelCase ( self : Optional[int] ,**SCREAMING_SNAKE_CASE__ : int):
kwargs.update(self.special_tokens_map)
return WavaVecaPhonemeCTCTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft')
# check adding a single token
tokenizer.add_tokens('xxx')
__lowerCamelCase : List[Any] = tokenizer('m xxx ɪ' ,do_phonemize=SCREAMING_SNAKE_CASE__).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE__ ,[1_3, 3_9_2, 1_7]) # xxx should be last token
tokenizer.add_tokens(['aaa', 'bbb', 'ccc'])
__lowerCamelCase : List[Any] = tokenizer('m aaa ɪ ccc' ,do_phonemize=SCREAMING_SNAKE_CASE__).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE__ ,[1_3, 3_9_3, 1_7, 3_9_5]) # aaa and ccc should be after xxx and 2 after aaa
__lowerCamelCase : Union[str, Any] = tokenizer('maɪ c' ,do_phonemize=SCREAMING_SNAKE_CASE__).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE__ ,[3, 2_0_0]) # mai should be <unk> (=3)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : List[str] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft')
__lowerCamelCase : Optional[int] = 'Hello how are you'
__lowerCamelCase : Dict = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ ,phonemizer_lang='en-us')
self.assertEqual(SCREAMING_SNAKE_CASE__ ,'h ə l oʊ h aʊ ɑːɹ j uː')
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft')
__lowerCamelCase : Dict = 'Hello how are you'
__lowerCamelCase : Optional[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ ,phonemizer_lang='en-us')
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE__).input_ids ,tokenizer(SCREAMING_SNAKE_CASE__ ,do_phonemize=SCREAMING_SNAKE_CASE__).input_ids)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Optional[int] = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft')
__lowerCamelCase : str = 'Hello how are you'
__lowerCamelCase : Dict = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ ,phonemizer_lang='en-us')
__lowerCamelCase : Optional[int] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE__).input_ids)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft')
__lowerCamelCase : Optional[Any] = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7],
]
__lowerCamelCase : str = tokenizer.decode(sample_ids[0])
__lowerCamelCase : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,batch_tokens[0])
self.assertEqual(SCREAMING_SNAKE_CASE__ ,['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'])
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' ,word_delimiter_token='|')
tokenizer.add_tokens('|')
__lowerCamelCase : Optional[Any] = 'Hello how are you'
__lowerCamelCase : Optional[int] = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ ,phonemizer_lang='en-us')
self.assertEqual(SCREAMING_SNAKE_CASE__ ,'h ə l oʊ | h aʊ | ɑːɹ | j uː |')
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Any = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' ,word_delimiter_token='|')
tokenizer.add_tokens('|')
__lowerCamelCase : List[str] = 'Hello how are you'
__lowerCamelCase : List[Any] = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ ,phonemizer_lang='en-us')
self.assertEqual(tokenizer(SCREAMING_SNAKE_CASE__).input_ids ,tokenizer(SCREAMING_SNAKE_CASE__ ,do_phonemize=SCREAMING_SNAKE_CASE__).input_ids)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' ,word_delimiter_token='|')
tokenizer.add_tokens('|')
# fmt: off
__lowerCamelCase : str = [
[1_1, 5, 1_5, tokenizer.pad_token_id, tokenizer.word_delimiter_token_id, 1_5, 8, tokenizer.word_delimiter_token_id, 9_8],
[tokenizer.word_delimiter_token_id, 2_4, 2_2, tokenizer.word_delimiter_token_id, 5, 2_4, 2_2, 5, 7_7],
]
# fmt: on
# decode with word_del_token filter
__lowerCamelCase : Union[str, Any] = tokenizer.decode(sample_ids[0])
__lowerCamelCase : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,batch_tokens[0])
self.assertEqual(SCREAMING_SNAKE_CASE__ ,['k s ɾ ɾ l ɭʲ', 'j ð s j ð s oːɹ'])
# decode with no word_del_token filter
__lowerCamelCase : Dict = tokenizer.decode(sample_ids[0] ,filter_word_delimiter_token=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,filter_word_delimiter_token=SCREAMING_SNAKE_CASE__)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,batch_tokens[0])
self.assertEqual(SCREAMING_SNAKE_CASE__ ,['k s ɾ | ɾ l | ɭʲ', '| j ð | s j ð s oːɹ'])
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' ,word_delimiter_token='|')
tokenizer.add_tokens('|')
__lowerCamelCase : Optional[int] = 'Hello how are you'
__lowerCamelCase : List[str] = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ ,phonemizer_lang='en-us')
__lowerCamelCase : Any = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE__).input_ids ,filter_word_delimiter_token=SCREAMING_SNAKE_CASE__)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : str = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' ,word_delimiter_token='|')
tokenizer.add_tokens('|')
__lowerCamelCase : str = 'Hello how are you'
__lowerCamelCase : Tuple = tokenizer.phonemize(SCREAMING_SNAKE_CASE__ ,phonemizer_lang='en-us')
__lowerCamelCase : List[Any] = tokenizer.decode(tokenizer(SCREAMING_SNAKE_CASE__).input_ids ,filter_word_delimiter_token=SCREAMING_SNAKE_CASE__)
self.assertEqual(' '.join([p.strip() for p in phonemes.split(' |')]).strip() ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained(
'facebook/wav2vec2-lv-60-espeak-cv-ft' ,word_delimiter_token=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = 'Hello how are you'
__lowerCamelCase : int = tokenizer(SCREAMING_SNAKE_CASE__ ,phonemizer_lang='en-us').input_ids
__lowerCamelCase : Optional[int] = tokenizer(SCREAMING_SNAKE_CASE__ ,phonemizer_lang='fr-fr').input_ids
self.assertNotEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = tokenizer.decode(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tokenizer.decode(SCREAMING_SNAKE_CASE__)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,'h ə l oʊ h aʊ ɑːɹ j uː')
self.assertEqual(SCREAMING_SNAKE_CASE__ ,'ɛ l o h aʊ a ʁ j u')
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft')
__lowerCamelCase : Any = 'Hello how Are you'
__lowerCamelCase : Dict = 'hello how are you'
__lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__).input_ids
__lowerCamelCase : int = tokenizer(SCREAMING_SNAKE_CASE__).input_ids
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Dict = self.tokenizer_class.from_pretrained('facebook/wav2vec2-lv-60-espeak-cv-ft')
tokenizer.add_tokens(['!', '?'])
tokenizer.add_special_tokens({'cls_token': '$$$'})
# fmt: off
__lowerCamelCase : Optional[Any] = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 8, 9_8, 3_9_2, 3_9_2, 3_9_3, 3_9_2, 3_9_2, 3_9_3, 3_9_4, 3_9_4],
[2_4, 2_2, 5, 2_4, 2_2, 5, 7_7, tokenizer.pad_token_id, 3_9_4, 3_9_4],
]
# fmt: on
__lowerCamelCase : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,['k s ɾ ɾ l ɭʲ!?!? $$$', 'j ð s j ð s oːɹ $$$'])
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : int = [d[key] for d in offsets]
return retrieved_list
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = self.get_tokenizer(word_delimiter_token='|')
tokenizer.add_tokens('|')
# fmt: off
# ksssɾɾ|ɾɾ<pad>ɾɾ|<pad>ɾlll|ɭʲ -> k s ɾ ɾ | ɾ l | ɭʲ"
__lowerCamelCase : Union[str, Any] = [1_1, 5, 5, 5, 1_5, 1_5, tokenizer.pad_token_id, 1_5, 1_5, tokenizer.word_delimiter_token_id, tokenizer.pad_token_id, 1_5, 8, 8, 8, tokenizer.word_delimiter_token_id, 9_8]
# fmt: on
__lowerCamelCase : Optional[int] = tokenizer.decode(SCREAMING_SNAKE_CASE__ ,output_char_offsets=SCREAMING_SNAKE_CASE__ ,filter_word_delimiter_token=SCREAMING_SNAKE_CASE__)
# check Wav2Vec2CTCTokenizerOutput keys for char
self.assertEqual(len(outputs.keys()) ,2)
self.assertTrue('text' in outputs)
self.assertTrue('char_offsets' in outputs)
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
# check that order of chars is correct and identical for both outputs
self.assertEqual(' '.join(self.get_from_offsets(outputs['char_offsets'] ,'char')) ,outputs.text)
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] ,'char') ,['k', 's', 'ɾ', 'ɾ', '|', 'ɾ', 'l', '|', 'ɭʲ'])
# check that offsets are actually correct for char
# 0-1 is 11, 1-4 is 5, 4-6 is first 15, 6-7 is <pad> (thus not shown), 7-9 is second 15, 9-10 is word_delimiter_token,
# 10-11 is <pad> (thus not shown), 11-12 is third 15, 12-15 is 8, 15-16 is word_delimiter_token, 16-17 is 98
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] ,'start_offset') ,[0, 1, 4, 7, 9, 1_1, 1_2, 1_5, 1_6])
self.assertListEqual(
self.get_from_offsets(outputs['char_offsets'] ,'end_offset') ,[1, 4, 6, 9, 1_0, 1_2, 1_5, 1_6, 1_7])
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Any = self.get_tokenizer(word_delimiter_token='|')
def check_list_tuples_equal(SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : int):
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
self.assertTrue(isinstance(outputs_list[0] ,SCREAMING_SNAKE_CASE__))
# transform list to ModelOutput
__lowerCamelCase : Union[str, Any] = WavaVecaPhonemeCTCTokenizerOutput(
{k: [d[k] for d in outputs_list] for k in outputs_list[0]})
self.assertListEqual(outputs_batch['text'] ,outputs_batch_a['text'])
def recursive_check(SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[str]):
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
[recursive_check(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) for la, la in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)]
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if "char_offsets" in outputs_batch:
recursive_check(outputs_batch['char_offsets'] ,outputs_batch_a['char_offsets'])
# fmt: off
__lowerCamelCase : Tuple = [
[1_1, 5, 1_5, tokenizer.pad_token_id, 1_5, 4, 8, 9_8, 3_2, 3_2, 3_2, 3_2, 4, 3_3, tokenizer.word_delimiter_token_id, 3_2, 3_2, 3_3, 3_4, 3_4],
[2_4, 2_2, 5, tokenizer.word_delimiter_token_id, tokenizer.word_delimiter_token_id, 2_4, 2_2, 2_2, 2_2, 4, 5, 7_7, tokenizer.pad_token_id, 2_2, 2_2, 4, 3_4, 3_4, 3_4, 3_4],
]
# fmt: on
# We assume that `decode` works as expected. All we will check now is
# the output type is correct and the output is identical to `decode`
# char
__lowerCamelCase : Any = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,output_char_offsets=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = [tokenizer.decode(SCREAMING_SNAKE_CASE__ ,output_char_offsets=SCREAMING_SNAKE_CASE__) for ids in sample_ids]
check_list_tuples_equal(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@unittest.skip('Wav2Vec2PhonemeTokenizer always lower cases letters to correctly map to phonemes')
def lowerCAmelCase ( self : Dict):
pass
@unittest.skip('Wav2Vec2PhonemeTokenizer always puts spaces between phonemes')
def lowerCAmelCase ( self : str):
pass
@unittest.skip('encodes to text to ids, but decodes ids to phonemes -> not possible to have internal consistency')
def lowerCAmelCase ( self : Tuple):
pass
@unittest.skip('Wav2Vec2PhonemeModel has no max model length => no testing')
def lowerCAmelCase ( self : List[str]):
pass
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Union[str, Any] = self.get_tokenizers(do_lower_case=SCREAMING_SNAKE_CASE__)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
__lowerCamelCase : Dict = tokenizer.vocab_size
__lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__)
self.assertNotEqual(SCREAMING_SNAKE_CASE__ ,0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
__lowerCamelCase : List[Any] = ['aaaaa bbbbbb', 'cccccccccdddddddd']
__lowerCamelCase : List[Any] = tokenizer.add_tokens(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = tokenizer.vocab_size
__lowerCamelCase : Union[str, Any] = len(SCREAMING_SNAKE_CASE__)
self.assertNotEqual(SCREAMING_SNAKE_CASE__ ,0)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__))
self.assertEqual(SCREAMING_SNAKE_CASE__ ,all_size + len(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Union[str, Any] = tokenizer.encode('aaaaa bbbbbb low cccccccccdddddddd l' ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__) ,4)
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1)
__lowerCamelCase : List[str] = {'eos_token': '>>>>|||<||<<|<<', 'pad_token': '<<<<<|||>|>>>>|>'}
__lowerCamelCase : Dict = tokenizer.add_special_tokens(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tokenizer.vocab_size
__lowerCamelCase : int = len(SCREAMING_SNAKE_CASE__)
self.assertNotEqual(SCREAMING_SNAKE_CASE__ ,0)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,len(SCREAMING_SNAKE_CASE__))
self.assertEqual(SCREAMING_SNAKE_CASE__ ,all_size_a + len(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = tokenizer.encode(
'>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l' ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertGreaterEqual(len(SCREAMING_SNAKE_CASE__) ,6)
self.assertGreater(tokens[0] ,tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] ,tokens[1])
self.assertGreater(tokens[-3] ,tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] ,tokens[-4])
self.assertEqual(tokens[0] ,tokenizer.eos_token_id)
self.assertEqual(tokens[-3] ,tokenizer.pad_token_id)
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.')
def lowerCAmelCase ( self : str):
pass
@unittest.skip('The tokenizer shouldn\'t be used to encode input IDs (except for labels), only to decode.')
def lowerCAmelCase ( self : int):
pass
def lowerCAmelCase ( self : str):
# The default common tokenizer tests assumes that the output of `convert_tokens_to_string` is a string which
# is not the case for Wav2Vec2PhonemeCTCTokenizer.
__lowerCamelCase : Tuple = self.get_tokenizers(fast=SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__)
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}"):
__lowerCamelCase : List[Any] = ['ð', 'ɪ', 's', 'ɪ', 'z', 'ɐ', 't', 'ɛ', 'k', 's', 't']
__lowerCamelCase : Tuple = tokenizer.convert_tokens_to_string(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(output['text'] ,SCREAMING_SNAKE_CASE__)
| 652 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
_UpperCAmelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 652 | 1 |
from math import factorial
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 2_0 ) -> int:
__lowerCamelCase : List[Any] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__lowerCamelCase : List[Any] = n // 2
return int(factorial(lowerCamelCase__ ) / (factorial(lowerCamelCase__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a =int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 652 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 652 | 1 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=lowerCamelCase__ , default=lowerCamelCase__ , required=lowerCamelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=lowerCamelCase__ , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=lowerCamelCase__ , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=lowerCamelCase__ , default=4_2 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=lowerCamelCase__ , default=0 , help='cuda_id.' , )
__lowerCamelCase : Dict = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
if not len(lowerCamelCase__ ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
__lowerCamelCase , __lowerCamelCase : Dict = imgs[0].size
__lowerCamelCase : Dict = Image.new('RGB' , size=(cols * w, rows * h) )
__lowerCamelCase , __lowerCamelCase : Any = grid.size
for i, img in enumerate(lowerCamelCase__ ):
grid.paste(lowerCamelCase__ , box=(i % cols * w, i // cols * h) )
return grid
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__="robotic cat with wings" , lowerCamelCase__=7.5 , lowerCamelCase__=5_0 , lowerCamelCase__=1 , lowerCamelCase__=4_2 , ) -> List[Any]:
__lowerCamelCase : Dict = torch.Generator(pipeline.device ).manual_seed(lowerCamelCase__ )
__lowerCamelCase : List[str] = pipeline(
lowerCamelCase__ , guidance_scale=lowerCamelCase__ , num_inference_steps=lowerCamelCase__ , generator=lowerCamelCase__ , num_images_per_prompt=lowerCamelCase__ , ).images
__lowerCamelCase : Any = int(math.sqrt(lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = image_grid(lowerCamelCase__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
a =parse_args()
# Load models and create wrapper for stable diffusion
a =CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
a =CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
a =AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
a =UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
a =StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
a =lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
a =load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
a =unet.to(torch.device("""cuda""", args.cuda_id))
a =pipeline.to(unet.device)
a , a =generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
a =os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 652 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """vocab.txt"""}
a ={
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
a ={
"""openbmb/cpm-ant-10b""": 1024,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : int = collections.OrderedDict()
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as reader:
__lowerCamelCase : Optional[int] = reader.readlines()
for index, token in enumerate(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = token.rstrip('\n' )
__lowerCamelCase : Union[str, Any] = index
return vocab
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_0):
__lowerCamelCase : str = vocab
__lowerCamelCase : Dict = unk_token
__lowerCamelCase : int = max_input_chars_per_word
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : int = list(SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > self.max_input_chars_per_word:
return [self.unk_token]
__lowerCamelCase : Tuple = 0
__lowerCamelCase : str = []
while start < len(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = None
while start < end:
__lowerCamelCase : Any = ''.join(chars[start:end])
if substr in self.vocab:
__lowerCamelCase : Optional[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = end
return sub_tokens
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : Optional[int] = False
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple="<d>" ,SCREAMING_SNAKE_CASE__ : Tuple="</d>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" ,SCREAMING_SNAKE_CASE__ : str="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<unk>" ,SCREAMING_SNAKE_CASE__ : List[Any]="</n>" ,SCREAMING_SNAKE_CASE__ : int="</_>" ,SCREAMING_SNAKE_CASE__ : List[Any]="left" ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
requires_backends(self ,['jieba'])
super().__init__(
bod_token=SCREAMING_SNAKE_CASE__ ,eod_token=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,line_token=SCREAMING_SNAKE_CASE__ ,space_token=SCREAMING_SNAKE_CASE__ ,padding_side=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Optional[Any] = bod_token
__lowerCamelCase : Dict = eod_token
__lowerCamelCase : Any = load_vocab(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.encoder[space_token]
__lowerCamelCase : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowerCamelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1]))
__lowerCamelCase : int = {v: k for k, v in self.encoder.items()}
__lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token)
@property
def lowerCAmelCase ( self : List[Any]):
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : Tuple):
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : Union[str, Any]):
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : str):
return len(self.encoder)
def lowerCAmelCase ( self : str):
return dict(self.encoder ,**self.added_tokens_encoder)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Any = []
for x in jieba.cut(SCREAMING_SNAKE_CASE__ ,cut_all=SCREAMING_SNAKE_CASE__):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__))
return output_tokens
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Tuple = [i for i in token_ids if i >= 0]
__lowerCamelCase : str = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any]):
return token in self.encoder
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]):
return "".join(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]):
return self.encoder.get(SCREAMING_SNAKE_CASE__ ,self.encoder.get(self.unk_token))
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
return self.decoder.get(SCREAMING_SNAKE_CASE__ ,self.unk_token)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if os.path.isdir(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
__lowerCamelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
__lowerCamelCase : Any = 0
if " " in self.encoder:
__lowerCamelCase : Any = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__lowerCamelCase : str = self.encoder['\n']
del self.encoder["\n"]
__lowerCamelCase : str = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1]))
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!')
__lowerCamelCase : Any = token_index
writer.write(token + '\n')
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : List[int] = None):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
| 652 | 1 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0_0_1 ) -> int:
try:
__lowerCamelCase : List[str] = int(lowerCamelCase__ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
__lowerCamelCase : list[int] = []
__lowerCamelCase : Any = 2
while len(lowerCamelCase__ ) < nth:
if is_prime(lowerCamelCase__ ):
primes.append(lowerCamelCase__ )
num += 1
else:
num += 1
return primes[len(lowerCamelCase__ ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 652 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 | 1 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> Optional[Any]:
try:
__lowerCamelCase : Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase : Optional[Any] = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase : Optional[Any] = strtobool(lowerCamelCase__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"If set, {key} must be yes or no." )
return _value
a =parse_flag_from_env("""RUN_SLOW""", default=False)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
return unittest.skip('Test was skipped' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[Any]:
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=None , lowerCamelCase__=None ) -> Optional[int]:
if test_case is None:
return partial(lowerCamelCase__ , version=lowerCamelCase__ )
return unittest.skipUnless(is_torch_version('>=' , lowerCamelCase__ ) , F"test requires torch version >= {version}" )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Union[str, Any]:
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Dict:
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(lowerCamelCase__ )
a =(
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(lowerCamelCase__ )
class A_ ( unittest.TestCase ):
_UpperCAmelCase : Dict = True
@classmethod
def lowerCAmelCase ( cls : List[Any]):
__lowerCamelCase : Tuple = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase ( cls : Dict):
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def lowerCAmelCase ( self : Tuple):
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('**/*'):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(SCREAMING_SNAKE_CASE__)
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : int):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Union[mock.Mock, List[mock.Mock]]):
__lowerCamelCase : List[Any] = mocks if isinstance(SCREAMING_SNAKE_CASE__ ,(tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = AcceleratorState()
__lowerCamelCase : List[Any] = tensor[None].clone().to(state.device )
__lowerCamelCase : List[str] = gather(lowerCamelCase__ ).cpu()
__lowerCamelCase : List[Any] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , lowerCamelCase__ ):
return False
return True
class A_ :
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[Any] = returncode
__lowerCamelCase : int = stdout
__lowerCamelCase : Optional[int] = stderr
async def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
while True:
__lowerCamelCase : List[Any] = await stream.readline()
if line:
callback(lowerCamelCase__ )
else:
break
async def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=False , lowerCamelCase__=False ) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(lowerCamelCase__ ) )
__lowerCamelCase : Union[str, Any] = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=lowerCamelCase__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=lowerCamelCase__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase : Union[str, Any] = []
__lowerCamelCase : Dict = []
def tee(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="" ):
__lowerCamelCase : Optional[Any] = line.decode('utf-8' ).rstrip()
sink.append(lowerCamelCase__ )
if not quiet:
print(lowerCamelCase__ , lowerCamelCase__ , file=lowerCamelCase__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda lowerCamelCase__ : tee(lowerCamelCase__ , lowerCamelCase__ , sys.stderr , label='stderr:' ) ) ),
] , timeout=lowerCamelCase__ , )
return _RunOutput(await p.wait() , lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__=1_8_0 , lowerCamelCase__=False , lowerCamelCase__=True ) -> _RunOutput:
__lowerCamelCase : Tuple = asyncio.get_event_loop()
__lowerCamelCase : Tuple = loop.run_until_complete(
_stream_subprocess(lowerCamelCase__ , env=lowerCamelCase__ , stdin=lowerCamelCase__ , timeout=lowerCamelCase__ , quiet=lowerCamelCase__ , echo=lowerCamelCase__ ) )
__lowerCamelCase : Tuple = ' '.join(lowerCamelCase__ )
if result.returncode > 0:
__lowerCamelCase : Optional[int] = '\n'.join(result.stderr )
raise RuntimeError(
F"'{cmd_str}' failed with returncode {result.returncode}\n\n"
F"The combined stderr from workers follows:\n{stderr}" )
return result
class A_ ( SCREAMING_SNAKE_CASE ):
pass
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> str:
try:
__lowerCamelCase : Dict = subprocess.check_output(lowerCamelCase__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(lowerCamelCase__ , 'decode' ):
__lowerCamelCase : Optional[int] = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"Command `{' '.join(lowerCamelCase__ )}` failed with the following error:\n\n{e.output.decode()}" ) from e
| 652 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = (UnCLIPScheduler,)
def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Any = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**SCREAMING_SNAKE_CASE__)
return config
def lowerCAmelCase ( self : Optional[Any]):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log')
__lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range')
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = 0.5
assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5
assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5
assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : str = self.get_scheduler_config()
__lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = scheduler.timesteps
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter
__lowerCamelCase : List[str] = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Optional[Any] = pred_prev_sample
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 252.2682495) < 1E-2
assert abs(result_mean.item() - 0.3284743) < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(2_5)
__lowerCamelCase : int = scheduler.timesteps
__lowerCamelCase : Tuple = self.dummy_model()
__lowerCamelCase : Any = self.dummy_sample_deter
__lowerCamelCase : Any = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if i + 1 == timesteps.shape[0]:
__lowerCamelCase : Optional[Any] = None
else:
__lowerCamelCase : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : int = scheduler.step(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Union[str, Any] = pred_prev_sample
__lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 258.2044983) < 1E-2
assert abs(result_mean.item() - 0.3362038) < 1E-3
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Union[str, Any]):
pass
| 652 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
a =logging.get_logger(__name__)
a ={"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a ={
"""vocab_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt"""
),
"""squeezebert/squeezebert-mnli""": """https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt""",
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""squeezebert/squeezebert-uncased""": (
"""https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json"""
),
"""squeezebert/squeezebert-mnli-headless""": (
"""https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json"""
),
},
}
a ={
"""squeezebert/squeezebert-uncased""": 512,
"""squeezebert/squeezebert-mnli""": 512,
"""squeezebert/squeezebert-mnli-headless""": 512,
}
a ={
"""squeezebert/squeezebert-uncased""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli""": {"""do_lower_case""": True},
"""squeezebert/squeezebert-mnli-headless""": {"""do_lower_case""": True},
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Optional[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : List[Any] = SqueezeBertTokenizer
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : str=None ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : int="[UNK]" ,SCREAMING_SNAKE_CASE__ : Dict="[SEP]" ,SCREAMING_SNAKE_CASE__ : Dict="[PAD]" ,SCREAMING_SNAKE_CASE__ : Optional[int]="[CLS]" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="[MASK]" ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Tuple=None ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(
SCREAMING_SNAKE_CASE__ ,tokenizer_file=SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ ,strip_accents=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' ,SCREAMING_SNAKE_CASE__) != do_lower_case
or normalizer_state.get('strip_accents' ,SCREAMING_SNAKE_CASE__) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,SCREAMING_SNAKE_CASE__) != tokenize_chinese_chars
):
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,normalizer_state.pop('type'))
__lowerCamelCase : Any = do_lower_case
__lowerCamelCase : Optional[Any] = strip_accents
__lowerCamelCase : List[Any] = tokenize_chinese_chars
__lowerCamelCase : int = normalizer_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = do_lower_case
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int=None):
__lowerCamelCase : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Tuple = [self.sep_token_id]
__lowerCamelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
__lowerCamelCase : Union[str, Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ ,name=SCREAMING_SNAKE_CASE__)
return tuple(SCREAMING_SNAKE_CASE__)
| 652 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = '''swin2sr'''
_UpperCAmelCase : Any = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=1_8_0 ,SCREAMING_SNAKE_CASE__ : Any=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : int=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : int="1conv" ,SCREAMING_SNAKE_CASE__ : Optional[int]="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = image_size
__lowerCamelCase : str = patch_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : Dict = depths
__lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = num_heads
__lowerCamelCase : Tuple = window_size
__lowerCamelCase : Dict = mlp_ratio
__lowerCamelCase : str = qkv_bias
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Dict = use_absolute_embeddings
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : List[Any] = upscale
__lowerCamelCase : List[Any] = img_range
__lowerCamelCase : List[str] = resi_connection
__lowerCamelCase : Union[str, Any] = upsampler
| 652 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = '''t5'''
_UpperCAmelCase : Any = ['''past_key_values''']
_UpperCAmelCase : Union[str, Any] = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]=3_2_1_2_8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=5_1_2 ,SCREAMING_SNAKE_CASE__ : Any=6_4 ,SCREAMING_SNAKE_CASE__ : Tuple=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6 ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : List[str]=8 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3_2 ,SCREAMING_SNAKE_CASE__ : int=1_2_8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Tuple=1E-6 ,SCREAMING_SNAKE_CASE__ : Any=1.0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="relu" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0 ,SCREAMING_SNAKE_CASE__ : Dict=1 ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
__lowerCamelCase : Optional[Any] = vocab_size
__lowerCamelCase : Tuple = d_model
__lowerCamelCase : int = d_kv
__lowerCamelCase : Optional[int] = d_ff
__lowerCamelCase : Any = num_layers
__lowerCamelCase : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__lowerCamelCase : Tuple = num_heads
__lowerCamelCase : List[str] = relative_attention_num_buckets
__lowerCamelCase : List[Any] = relative_attention_max_distance
__lowerCamelCase : Optional[int] = dropout_rate
__lowerCamelCase : int = layer_norm_epsilon
__lowerCamelCase : Optional[Any] = initializer_factor
__lowerCamelCase : Dict = feed_forward_proj
__lowerCamelCase : int = use_cache
__lowerCamelCase : Optional[Any] = self.feed_forward_proj.split('-')
__lowerCamelCase : int = act_info[-1]
__lowerCamelCase : List[str] = act_info[0] == 'gated'
if len(SCREAMING_SNAKE_CASE__) > 1 and act_info[0] != "gated" or len(SCREAMING_SNAKE_CASE__) > 2:
raise ValueError(
F"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
__lowerCamelCase : int = 'gelu_new'
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ ,eos_token_id=SCREAMING_SNAKE_CASE__ ,is_encoder_decoder=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
class A_ ( SCREAMING_SNAKE_CASE ):
@property
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Tuple = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
__lowerCamelCase : Dict = 'past_encoder_sequence + sequence'
__lowerCamelCase : Dict = {0: 'batch'}
__lowerCamelCase : Dict = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowerCamelCase : Dict = {0: 'batch', 1: 'decoder_sequence'}
__lowerCamelCase : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(SCREAMING_SNAKE_CASE__ ,direction='inputs')
return common_inputs
@property
def lowerCAmelCase ( self : List[Any]):
return 1_3
| 652 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict:
__lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {}
__lowerCamelCase : int = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]:
__lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,):
super().__init__()
__lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source')
__lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target')
__lowerCamelCase : List[Any] = self.get_char_lens(self.src_file)
__lowerCamelCase : List[Any] = max_source_length
__lowerCamelCase : List[str] = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Optional[int] = prefix
if n_obs is not None:
__lowerCamelCase : Dict = self.src_lens[:n_obs]
__lowerCamelCase : str = src_lang
__lowerCamelCase : Any = tgt_lang
def __len__( self : Tuple):
return len(self.src_lens)
def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Dict = index + 1 # linecache starts at 1
__lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
__lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowerCamelCase : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
)
__lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
__lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right')
__lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right')
__lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int):
return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()]
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch])
__lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch])
__lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch])
__lowerCamelCase : Optional[int] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
a =getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return list(itertools.chain.from_iterable(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
__lowerCamelCase : str = get_git_info()
save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]:
with open(lowerCamelCase__ , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
with open(lowerCamelCase__ ) as f:
return json.load(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ )
__lowerCamelCase : Any = {
'repo_id': str(lowerCamelCase__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List:
return list(map(lowerCamelCase__ , lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
with open(lowerCamelCase__ , 'wb' ) as f:
return pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
def remove_articles(lowerCamelCase__ ):
return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
__lowerCamelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ )
__lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
__lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
__lowerCamelCase : Dict = 0
for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ):
em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
em /= len(lowerCamelCase__ )
return {"em": em}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
return model_prefix.startswith('rag' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowerCamelCase : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
continue
__lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p]
setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
return hparams, config
| 652 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A_ :
@staticmethod
def lowerCAmelCase ( *SCREAMING_SNAKE_CASE__ : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A_ ( unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : List[str] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__)
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Union[str, Any] = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' ,threshold=0.0)
self.assertGreater(len(SCREAMING_SNAKE_CASE__) ,0)
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE__ ,{
'score': ANY(SCREAMING_SNAKE_CASE__),
'label': ANY(SCREAMING_SNAKE_CASE__),
'box': {'xmin': ANY(SCREAMING_SNAKE_CASE__), 'ymin': ANY(SCREAMING_SNAKE_CASE__), 'xmax': ANY(SCREAMING_SNAKE_CASE__), 'ymax': ANY(SCREAMING_SNAKE_CASE__)},
} ,)
import datasets
__lowerCamelCase : Optional[int] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' ,'image' ,split='test')
__lowerCamelCase : List[str] = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png'),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__lowerCamelCase : List[str] = object_detector(SCREAMING_SNAKE_CASE__ ,threshold=0.0)
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,len(SCREAMING_SNAKE_CASE__))
for outputs in batch_outputs:
self.assertGreater(len(SCREAMING_SNAKE_CASE__) ,0)
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE__ ,{
'score': ANY(SCREAMING_SNAKE_CASE__),
'label': ANY(SCREAMING_SNAKE_CASE__),
'box': {'xmin': ANY(SCREAMING_SNAKE_CASE__), 'ymin': ANY(SCREAMING_SNAKE_CASE__), 'xmax': ANY(SCREAMING_SNAKE_CASE__), 'ymax': ANY(SCREAMING_SNAKE_CASE__)},
} ,)
@require_tf
@unittest.skip('Object detection not implemented in TF')
def lowerCAmelCase ( self : Optional[Any]):
pass
@require_torch
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : str = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__lowerCamelCase : Optional[Any] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ,threshold=0.0)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
] ,)
__lowerCamelCase : List[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] ,threshold=0.0 ,)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_5_9, 'ymin': 1_2_0, 'xmax': 4_8_0, 'ymax': 3_5_9}},
],
] ,)
@require_torch
@slow
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : str = 'facebook/detr-resnet-50'
__lowerCamelCase : Any = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE__ ,feature_extractor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg')
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
] ,)
__lowerCamelCase : Optional[int] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
])
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
] ,)
@require_torch
@slow
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Dict = 'facebook/detr-resnet-50'
__lowerCamelCase : str = pipeline('object-detection' ,model=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg')
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
] ,)
__lowerCamelCase : Tuple = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
])
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 4_0, 'ymin': 7_0, 'xmax': 1_7_5, 'ymax': 1_1_7}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_3_3, 'ymin': 7_2, 'xmax': 3_6_8, 'ymax': 1_8_7}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_3_9, 'ymax': 4_7_3}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
],
] ,)
@require_torch
@slow
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : List[str] = 0.9985
__lowerCamelCase : List[str] = 'facebook/detr-resnet-50'
__lowerCamelCase : str = pipeline('object-detection' ,model=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' ,threshold=SCREAMING_SNAKE_CASE__)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 1_3, 'ymin': 5_2, 'xmax': 3_1_4, 'ymax': 4_7_0}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_4_5, 'ymin': 2_3, 'xmax': 6_4_0, 'ymax': 3_6_8}},
] ,)
@require_torch
@require_pytesseract
@slow
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = 'Narsil/layoutlmv3-finetuned-funsd'
__lowerCamelCase : Dict = 0.9993
__lowerCamelCase : Optional[Any] = pipeline('object-detection' ,model=SCREAMING_SNAKE_CASE__ ,threshold=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png')
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE__ ,decimals=4) ,[
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_9_4, 'ymin': 2_5_4, 'xmax': 3_4_3, 'ymax': 2_6_4}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_9_4, 'ymin': 2_5_4, 'xmax': 3_4_3, 'ymax': 2_6_4}},
] ,)
| 652 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a =logging.get_logger(__name__)
# General docstring
a ="""MobileNetV1Config"""
# Base docstring
a ="""google/mobilenet_v1_1.0_224"""
a =[1, 1024, 7, 7]
# Image classification docstring
a ="""google/mobilenet_v1_1.0_224"""
a ="""tabby, tabby cat"""
a =[
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> str:
__lowerCamelCase : str = {}
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : int = model.mobilenet_va
else:
__lowerCamelCase : List[str] = model
__lowerCamelCase : List[Any] = 'MobilenetV1/Conv2d_0/'
__lowerCamelCase : List[Any] = backbone.conv_stem.convolution.weight
__lowerCamelCase : List[str] = backbone.conv_stem.normalization.bias
__lowerCamelCase : Tuple = backbone.conv_stem.normalization.weight
__lowerCamelCase : Union[str, Any] = backbone.conv_stem.normalization.running_mean
__lowerCamelCase : Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
__lowerCamelCase : Any = i + 1
__lowerCamelCase : Union[str, Any] = i * 2
__lowerCamelCase : Optional[Any] = backbone.layer[pt_index]
__lowerCamelCase : Optional[int] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
__lowerCamelCase : Tuple = pointer.convolution.weight
__lowerCamelCase : Optional[Any] = pointer.normalization.bias
__lowerCamelCase : Union[str, Any] = pointer.normalization.weight
__lowerCamelCase : List[str] = pointer.normalization.running_mean
__lowerCamelCase : Union[str, Any] = pointer.normalization.running_var
__lowerCamelCase : int = backbone.layer[pt_index + 1]
__lowerCamelCase : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
__lowerCamelCase : Optional[Any] = pointer.convolution.weight
__lowerCamelCase : Any = pointer.normalization.bias
__lowerCamelCase : str = pointer.normalization.weight
__lowerCamelCase : Dict = pointer.normalization.running_mean
__lowerCamelCase : List[str] = pointer.normalization.running_var
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__lowerCamelCase : Any = model.classifier.weight
__lowerCamelCase : int = model.classifier.bias
return tf_to_pt_map
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__lowerCamelCase : List[str] = tf.train.list_variables(lowerCamelCase__ )
__lowerCamelCase : List[str] = {}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
__lowerCamelCase : Any = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = array
# Build TF to PyTorch weights loading map
__lowerCamelCase : Tuple = _build_tf_to_pytorch_map(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
__lowerCamelCase : Optional[int] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__lowerCamelCase : List[str] = np.transpose(lowerCamelCase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__lowerCamelCase : Any = array.squeeze().transpose()
else:
__lowerCamelCase : Tuple = np.transpose(lowerCamelCase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
__lowerCamelCase : Optional[Any] = torch.from_numpy(lowerCamelCase__ )
tf_weights.pop(lowerCamelCase__ , lowerCamelCase__ )
tf_weights.pop(name + '/RMSProp' , lowerCamelCase__ )
tf_weights.pop(name + '/RMSProp_1' , lowerCamelCase__ )
tf_weights.pop(name + '/ExponentialMovingAverage' , lowerCamelCase__ )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> torch.Tensor:
__lowerCamelCase , __lowerCamelCase : int = features.shape[-2:]
__lowerCamelCase , __lowerCamelCase : List[str] = conv_layer.stride
__lowerCamelCase , __lowerCamelCase : str = conv_layer.kernel_size
if in_height % stride_height == 0:
__lowerCamelCase : Optional[int] = max(kernel_height - stride_height , 0 )
else:
__lowerCamelCase : Union[str, Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__lowerCamelCase : List[str] = max(kernel_width - stride_width , 0 )
else:
__lowerCamelCase : List[str] = max(kernel_width - (in_width % stride_width) , 0 )
__lowerCamelCase : List[str] = pad_along_width // 2
__lowerCamelCase : Optional[int] = pad_along_width - pad_left
__lowerCamelCase : Any = pad_along_height // 2
__lowerCamelCase : List[Any] = pad_along_height - pad_top
__lowerCamelCase : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase__ , lowerCamelCase__ , 'constant' , 0.0 )
class A_ ( nn.Module ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[bool] = True ,SCREAMING_SNAKE_CASE__ : Optional[bool or str] = True ,):
super().__init__()
__lowerCamelCase : Dict = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
__lowerCamelCase : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2)
__lowerCamelCase : Optional[int] = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ ,padding_mode='zeros' ,)
if use_normalization:
__lowerCamelCase : Optional[int] = nn.BatchNormad(
num_features=SCREAMING_SNAKE_CASE__ ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=SCREAMING_SNAKE_CASE__ ,track_running_stats=SCREAMING_SNAKE_CASE__ ,)
else:
__lowerCamelCase : Dict = None
if use_activation:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : str = ACTaFN[config.hidden_act]
else:
__lowerCamelCase : List[str] = config.hidden_act
else:
__lowerCamelCase : List[str] = None
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.Tensor):
if self.config.tf_padding:
__lowerCamelCase : Any = apply_tf_padding(SCREAMING_SNAKE_CASE__ ,self.convolution)
__lowerCamelCase : Optional[int] = self.convolution(SCREAMING_SNAKE_CASE__)
if self.normalization is not None:
__lowerCamelCase : Dict = self.normalization(SCREAMING_SNAKE_CASE__)
if self.activation is not None:
__lowerCamelCase : List[str] = self.activation(SCREAMING_SNAKE_CASE__)
return features
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = MobileNetVaConfig
_UpperCAmelCase : List[str] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[str] = '''mobilenet_v1'''
_UpperCAmelCase : Any = '''pixel_values'''
_UpperCAmelCase : int = False
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[nn.Linear, nn.Convad]):
if isinstance(SCREAMING_SNAKE_CASE__ ,(nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ ,nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
a =r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a =r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : bool = True):
super().__init__(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = config
__lowerCamelCase : Optional[int] = 3_2
__lowerCamelCase : List[str] = max(int(depth * config.depth_multiplier) ,config.min_depth)
__lowerCamelCase : Optional[Any] = MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=config.num_channels ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=2 ,)
__lowerCamelCase : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__lowerCamelCase : str = nn.ModuleList()
for i in range(1_3):
__lowerCamelCase : str = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__lowerCamelCase : str = max(int(depth * config.depth_multiplier) ,config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=strides[i] ,groups=SCREAMING_SNAKE_CASE__ ,))
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,))
__lowerCamelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict):
raise NotImplementedError
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,):
__lowerCamelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
__lowerCamelCase : Optional[Any] = self.conv_stem(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
__lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__)
if output_hidden_states:
__lowerCamelCase : Any = all_hidden_states + (hidden_states,)
__lowerCamelCase : Optional[Any] = hidden_states
if self.pooler is not None:
__lowerCamelCase : Tuple = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE__) ,start_dim=1)
else:
__lowerCamelCase : List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__ ,)
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig):
super().__init__(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = config.num_labels
__lowerCamelCase : Optional[Any] = MobileNetVaModel(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__lowerCamelCase : Any = nn.Dropout(config.classifier_dropout_prob ,inplace=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,):
__lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Optional[int] = self.mobilenet_va(SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : List[str] = self.classifier(self.dropout(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase : Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase : int = 'single_label_classification'
else:
__lowerCamelCase : Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowerCamelCase : Tuple = MSELoss()
if self.num_labels == 1:
__lowerCamelCase : int = loss_fct(logits.squeeze() ,labels.squeeze())
else:
__lowerCamelCase : Union[str, Any] = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase : List[str] = CrossEntropyLoss()
__lowerCamelCase : List[str] = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase : int = BCEWithLogitsLoss()
__lowerCamelCase : int = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if not return_dict:
__lowerCamelCase : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states ,)
| 652 | 1 |
from scipy.stats import pearsonr
import datasets
a ="""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
a ="""
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
a ="""
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : List[Any]):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) ,reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=False):
if return_pvalue:
__lowerCamelCase : str = pearsonr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)[0])}
| 652 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = cva.getAffineTransform(lowerCamelCase__ , lowerCamelCase__ )
return cva.warpAffine(lowerCamelCase__ , lowerCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
a =cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
a =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a , a =gray_img.shape
# set different points to rotate image
a =np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
a =np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
a =np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
a =np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
a =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a =plt.figure(1)
a =["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 652 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int | float:
if len(lowerCamelCase__ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(lowerCamelCase__ )
or left < -len(lowerCamelCase__ )
or right >= len(lowerCamelCase__ )
or right < -len(lowerCamelCase__ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
__lowerCamelCase : Any = (left + right) >> 1 # the middle
__lowerCamelCase : int = find_max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # find max in range[left, mid]
__lowerCamelCase : int = find_max(lowerCamelCase__ , mid + 1 , lowerCamelCase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 652 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCamelCase__ )
if number < 1:
__lowerCamelCase : int = F"Input value of [number={number}] must be > 0"
raise ValueError(lowerCamelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__lowerCamelCase : Any = int(math.log(number // 3 , 2 ) ) + 2
__lowerCamelCase : List[Any] = [3, 5]
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : List[str] = 3
for block in range(1 , lowerCamelCase__ ):
for _ in range(lowerCamelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a =0
try:
a =proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 652 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
_UpperCAmelCase : Optional[int] = ViTImageProcessor if is_vision_available() else None
@property
def lowerCAmelCase ( self : Optional[Any]):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Dict = (3, 3_2, 1_2_8)
__lowerCamelCase : List[str] = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase : int = ['[GO]', '[s]', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
# fmt: on
__lowerCamelCase : Dict = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__))))
__lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file ,'w' ,encoding='utf-8') as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__) + '\n')
__lowerCamelCase : int = {
'do_normalize': False,
'do_resize': True,
'image_processor_type': 'ViTImageProcessor',
'resample': 3,
'size': {'height': 3_2, 'width': 1_2_8},
}
__lowerCamelCase : Any = os.path.join(self.tmpdirname ,SCREAMING_SNAKE_CASE__)
with open(self.image_processor_file ,'w' ,encoding='utf-8') as fp:
json.dump(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any ,**SCREAMING_SNAKE_CASE__ : Any):
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,**SCREAMING_SNAKE_CASE__ : Dict):
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
shutil.rmtree(self.tmpdirname)
def lowerCAmelCase ( self : Any):
__lowerCamelCase : List[str] = np.random.randint(2_5_5 ,size=(3, 3_0, 4_0_0) ,dtype=np.uinta)
__lowerCamelCase : int = Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ ,0 ,-1))
return image_input
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Tuple = self.get_tokenizer()
__lowerCamelCase : List[Any] = self.get_image_processor()
__lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__)
processor.save_pretrained(self.tmpdirname)
__lowerCamelCase : List[str] = MgpstrProcessor.from_pretrained(self.tmpdirname ,use_fast=SCREAMING_SNAKE_CASE__)
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer ,SCREAMING_SNAKE_CASE__)
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string())
self.assertIsInstance(processor.image_processor ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Any = self.get_tokenizer()
__lowerCamelCase : Optional[int] = self.get_image_processor()
__lowerCamelCase : Optional[Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__)
processor.save_pretrained(self.tmpdirname)
__lowerCamelCase : Optional[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)')
__lowerCamelCase : Any = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ ,padding_value=1.0)
__lowerCamelCase : str = MgpstrProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=SCREAMING_SNAKE_CASE__ ,padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer ,SCREAMING_SNAKE_CASE__)
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
__lowerCamelCase : List[Any] = self.get_image_processor()
__lowerCamelCase : Optional[Any] = self.get_tokenizer()
__lowerCamelCase : str = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.prepare_image_inputs()
__lowerCamelCase : Optional[Any] = image_processor(SCREAMING_SNAKE_CASE__ ,return_tensors='np')
__lowerCamelCase : Optional[int] = processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2)
def lowerCAmelCase ( self : str):
__lowerCamelCase : Union[str, Any] = self.get_image_processor()
__lowerCamelCase : List[str] = self.get_tokenizer()
__lowerCamelCase : Optional[int] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = 'test'
__lowerCamelCase : Tuple = processor(text=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = tokenizer(SCREAMING_SNAKE_CASE__)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key])
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Optional[Any] = self.get_image_processor()
__lowerCamelCase : Union[str, Any] = self.get_tokenizer()
__lowerCamelCase : Union[str, Any] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = 'test'
__lowerCamelCase : List[str] = self.prepare_image_inputs()
__lowerCamelCase : int = processor(text=SCREAMING_SNAKE_CASE__ ,images=SCREAMING_SNAKE_CASE__)
self.assertListEqual(list(inputs.keys()) ,['pixel_values', 'labels'])
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__):
processor()
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Optional[Any] = self.get_image_processor()
__lowerCamelCase : Union[str, Any] = self.get_tokenizer()
__lowerCamelCase : List[str] = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase : Any = processor.char_decode(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = [seq.replace(' ' ,'') for seq in decoded_tok]
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Tuple = self.get_image_processor()
__lowerCamelCase : Optional[Any] = self.get_tokenizer()
__lowerCamelCase : str = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = None
__lowerCamelCase : Any = self.prepare_image_inputs()
__lowerCamelCase : Union[str, Any] = processor(text=SCREAMING_SNAKE_CASE__ ,images=SCREAMING_SNAKE_CASE__)
self.assertListEqual(list(inputs.keys()) ,processor.model_input_names)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = self.get_image_processor()
__lowerCamelCase : int = self.get_tokenizer()
__lowerCamelCase : str = MgpstrProcessor(tokenizer=SCREAMING_SNAKE_CASE__ ,image_processor=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = torch.randn(1 ,2_7 ,3_8)
__lowerCamelCase : Optional[Any] = torch.randn(1 ,2_7 ,5_0_2_5_7)
__lowerCamelCase : Optional[Any] = torch.randn(1 ,2_7 ,3_0_5_2_2)
__lowerCamelCase : Tuple = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) ,['generated_text', 'scores', 'char_preds', 'bpe_preds', 'wp_preds'])
| 652 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 ,SCREAMING_SNAKE_CASE__ : int=4_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
__lowerCamelCase : str = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : int = num_channels
__lowerCamelCase : Dict = min_resolution
__lowerCamelCase : Tuple = max_resolution
__lowerCamelCase : Dict = do_resize
__lowerCamelCase : List[Any] = size
__lowerCamelCase : Tuple = do_normalize
__lowerCamelCase : Any = image_mean
__lowerCamelCase : List[str] = image_std
__lowerCamelCase : List[Any] = do_rescale
__lowerCamelCase : str = rescale_factor
__lowerCamelCase : Tuple = do_pad
def lowerCAmelCase ( self : Dict):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]=False):
if not batched:
__lowerCamelCase : Optional[Any] = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ ,Image.Image):
__lowerCamelCase , __lowerCamelCase : Any = image.size
else:
__lowerCamelCase , __lowerCamelCase : Any = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase : Optional[int] = int(self.size['shortest_edge'] * h / w)
__lowerCamelCase : Tuple = self.size['shortest_edge']
elif w > h:
__lowerCamelCase : Union[str, Any] = self.size['shortest_edge']
__lowerCamelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h)
else:
__lowerCamelCase : List[Any] = self.size['shortest_edge']
__lowerCamelCase : Optional[int] = self.size['shortest_edge']
else:
__lowerCamelCase : List[str] = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__lowerCamelCase : Tuple = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[0])[0]
__lowerCamelCase : Dict = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[int] = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = DetaImageProcessingTester(self)
@property
def lowerCAmelCase ( self : Any):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_rescale'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_pad'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size'))
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size ,{'shortest_edge': 1_8, 'longest_edge': 1_3_3_3})
self.assertEqual(image_processor.do_pad ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
pass
def lowerCAmelCase ( self : List[str]):
# Initialize image_processing
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image)
# Test not batched input
__lowerCamelCase : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase ( self : str):
# Initialize image_processing
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray)
# Test not batched input
__lowerCamelCase : Tuple = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase ( self : int):
# Initialize image_processing
__lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor)
# Test not batched input
__lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def lowerCAmelCase ( self : Optional[Any]):
# prepare image and target
__lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r') as f:
__lowerCamelCase : List[str] = json.loads(f.read())
__lowerCamelCase : Union[str, Any] = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
__lowerCamelCase : Optional[int] = DetaImageProcessor()
__lowerCamelCase : int = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
# verify pixel values
__lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
# verify area
__lowerCamelCase : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__))
# verify boxes
__lowerCamelCase : int = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# verify image_id
__lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__))
# verify is_crowd
__lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__))
# verify class_labels
__lowerCamelCase : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__))
# verify orig_size
__lowerCamelCase : str = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__))
# verify size
__lowerCamelCase : int = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
@slow
def lowerCAmelCase ( self : str):
# prepare image, target and masks_path
__lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r') as f:
__lowerCamelCase : Tuple = json.loads(f.read())
__lowerCamelCase : List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
__lowerCamelCase : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
__lowerCamelCase : List[str] = DetaImageProcessor(format='coco_panoptic')
__lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,masks_path=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
# verify pixel values
__lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
# verify area
__lowerCamelCase : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__))
# verify boxes
__lowerCamelCase : Tuple = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# verify image_id
__lowerCamelCase : int = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__))
# verify is_crowd
__lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__))
# verify class_labels
__lowerCamelCase : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__))
# verify masks
__lowerCamelCase : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,SCREAMING_SNAKE_CASE__)
# verify orig_size
__lowerCamelCase : Any = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__))
# verify size
__lowerCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
| 652 | 1 |
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Optional[int]="</s>" ,SCREAMING_SNAKE_CASE__ : int="<unk>" ,SCREAMING_SNAKE_CASE__ : int="<pad>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_2_5 ,SCREAMING_SNAKE_CASE__ : str=None ,**SCREAMING_SNAKE_CASE__ : Any ,):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__lowerCamelCase : Dict = [F"<extra_id_{i}>" for i in range(SCREAMING_SNAKE_CASE__)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__lowerCamelCase : Union[str, Any] = len(set(filter(lambda SCREAMING_SNAKE_CASE__: bool('extra_id' in str(SCREAMING_SNAKE_CASE__)) ,SCREAMING_SNAKE_CASE__)))
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens')
__lowerCamelCase : Tuple = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else pad_token
__lowerCamelCase : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else eos_token
__lowerCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else unk_token
super().__init__(
eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,extra_ids=SCREAMING_SNAKE_CASE__ ,additional_special_tokens=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : str = extra_ids
__lowerCamelCase : Dict = 2**8 # utf is 8 bits
# define special tokens dict
__lowerCamelCase : Dict[int, str] = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__lowerCamelCase : Optional[Any] = len(self.special_tokens_encoder)
__lowerCamelCase : Dict = len(SCREAMING_SNAKE_CASE__)
for i, token in enumerate(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Dict = self.vocab_size + i - n
__lowerCamelCase : Dict[str, int] = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def lowerCAmelCase ( self : Any):
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int]):
if len(SCREAMING_SNAKE_CASE__) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
' eos tokens being added.')
return token_ids
else:
return token_ids + [self.eos_token_id]
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Any = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Any = self._add_eos_if_not_present(SCREAMING_SNAKE_CASE__)
if token_ids_a is None:
return token_ids_a
else:
__lowerCamelCase : Optional[Any] = self._add_eos_if_not_present(SCREAMING_SNAKE_CASE__)
return token_ids_a + token_ids_a
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : List[str] = [chr(SCREAMING_SNAKE_CASE__) for i in text.encode('utf-8')]
return tokens
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]):
if token in self.special_tokens_encoder:
__lowerCamelCase : str = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__lowerCamelCase : List[Any] = self.added_tokens_encoder[token]
elif len(SCREAMING_SNAKE_CASE__) != 1:
__lowerCamelCase : Optional[Any] = self.unk_token_id
else:
__lowerCamelCase : Union[str, Any] = ord(SCREAMING_SNAKE_CASE__) + self._num_special_tokens
return token_id
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Dict):
if index in self.special_tokens_decoder:
__lowerCamelCase : Optional[Any] = self.special_tokens_decoder[index]
else:
__lowerCamelCase : Union[str, Any] = chr(index - self._num_special_tokens)
return token
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[int] = b''
for token in tokens:
if token in self.special_tokens_decoder:
__lowerCamelCase : Optional[Any] = self.special_tokens_decoder[token].encode('utf-8')
elif token in self.added_tokens_decoder:
__lowerCamelCase : int = self.special_tokens_decoder[token].encode('utf-8')
elif token in self.special_tokens_encoder:
__lowerCamelCase : Optional[int] = token.encode('utf-8')
elif token in self.added_tokens_encoder:
__lowerCamelCase : Any = token.encode('utf-8')
else:
__lowerCamelCase : str = bytes([ord(SCREAMING_SNAKE_CASE__)])
bstring += tok_string
__lowerCamelCase : Optional[int] = bstring.decode('utf-8' ,errors='ignore')
return string
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
return ()
| 652 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = KandinskyVaaControlnetPipeline
_UpperCAmelCase : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : int = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Tuple = False
@property
def lowerCAmelCase ( self : Tuple):
return 3_2
@property
def lowerCAmelCase ( self : List[Any]):
return 3_2
@property
def lowerCAmelCase ( self : str):
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[str]):
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : List[str]):
return 1_0_0
@property
def lowerCAmelCase ( self : Dict):
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__)
return model
@property
def lowerCAmelCase ( self : Union[str, Any]):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Optional[Any]):
torch.manual_seed(0)
__lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = self.dummy_unet
__lowerCamelCase : List[Any] = self.dummy_movq
__lowerCamelCase : str = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='linear' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Dict = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=0):
__lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to(
SCREAMING_SNAKE_CASE__)
# create hint
__lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = 'cpu'
__lowerCamelCase : Tuple = self.get_dummy_components()
__lowerCamelCase : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = output.images
__lowerCamelCase : Tuple = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0]
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : List[str] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : int):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy')
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
__lowerCamelCase : Tuple = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__)).float() / 255.0
__lowerCamelCase : str = hint.permute(2 ,0 ,1).unsqueeze(0)
__lowerCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa)
pipe_prior.to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa)
__lowerCamelCase : int = pipeline.to(SCREAMING_SNAKE_CASE__)
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 'A robot, 4k photo'
__lowerCamelCase : List[str] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe_prior(
SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
__lowerCamelCase : Optional[Any] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase : Any = pipeline(
image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,hint=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,output_type='np' ,)
__lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
| 652 | 1 |
a ="""
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
a =[{"""type""": """code""", """content""": INSTALL_CONTENT}]
a ={
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 652 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
_UpperCAmelCase : int = XGLMConfig
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Tuple = '''gelu'''
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_4 ,SCREAMING_SNAKE_CASE__ : Tuple=7 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=9_9 ,SCREAMING_SNAKE_CASE__ : str=3_2 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 ,SCREAMING_SNAKE_CASE__ : Tuple=3_7 ,SCREAMING_SNAKE_CASE__ : Tuple="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=5_1_2 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : str = seq_length
__lowerCamelCase : Optional[Any] = is_training
__lowerCamelCase : Any = use_input_mask
__lowerCamelCase : str = use_labels
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : Dict = d_model
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : List[str] = ffn_dim
__lowerCamelCase : Optional[Any] = activation_function
__lowerCamelCase : Tuple = activation_dropout
__lowerCamelCase : Union[str, Any] = attention_dropout
__lowerCamelCase : List[str] = max_position_embeddings
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Any = None
__lowerCamelCase : List[str] = 0
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Dict = 1
def lowerCAmelCase ( self : Any):
return XGLMConfig.from_pretrained('facebook/xglm-564M')
def lowerCAmelCase ( self : str):
__lowerCamelCase : Any = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) ,clip_value_min=0 ,clip_value_max=3)
__lowerCamelCase : Dict = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : int = self.get_config()
__lowerCamelCase : Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase ( self : List[Any]):
return XGLMConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=SCREAMING_SNAKE_CASE__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : int):
__lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Any = config_and_inputs
__lowerCamelCase : str = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_UpperCAmelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
_UpperCAmelCase : str = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Union[str, Any] = False
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Tuple = TFXGLMModelTester(self)
__lowerCamelCase : int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,n_embd=3_7)
def lowerCAmelCase ( self : List[Any]):
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase ( self : str):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.')
def lowerCAmelCase ( self : Union[str, Any]):
super().test_resize_token_embeddings()
@require_tf
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True):
__lowerCamelCase : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : int = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] ,dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : Optional[Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowerCamelCase : int = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Tuple = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
tf.random.set_seed(0)
__lowerCamelCase : Optional[Any] = tokenizer('Today is a nice day and' ,return_tensors='tf')
__lowerCamelCase : List[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0'):
__lowerCamelCase : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,seed=[7, 0])
__lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : Union[str, Any] = 'left'
# use different length sentences to test batching
__lowerCamelCase : List[str] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors='tf' ,padding=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = inputs['input_ids']
__lowerCamelCase : Dict = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=inputs['attention_mask'] ,max_new_tokens=1_2)
__lowerCamelCase : Tuple = tokenizer(sentences[0] ,return_tensors='tf').input_ids
__lowerCamelCase : List[str] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2)
__lowerCamelCase : Any = tokenizer(sentences[1] ,return_tensors='tf').input_ids
__lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2)
__lowerCamelCase : int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tokenizer.decode(output_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,[non_padded_sentence, padded_sentence])
| 652 | 1 |
from math import factorial
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> float:
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
__lowerCamelCase : Union[str, Any] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__lowerCamelCase : List[Any] = float(factorial(lowerCamelCase__ ) )
coefficient /= factorial(lowerCamelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75))
| 652 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 652 | 1 |
from collections.abc import Sequence
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ = False ) -> float:
if not arr:
return 0
__lowerCamelCase : Tuple = 0 if allow_empty_subarrays else float('-inf' )
__lowerCamelCase : int = 0.0
for num in arr:
__lowerCamelCase : Tuple = max(0 if allow_empty_subarrays else num , curr_sum + num )
__lowerCamelCase : Any = max(lowerCamelCase__ , lowerCamelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
a =[-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F"""{max_subarray_sum(nums) = }""")
| 652 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a =logging.get_logger(__name__)
# General docstring
a ="""RegNetConfig"""
# Base docstring
a ="""facebook/regnet-y-040"""
a =[1, 1088, 7, 7]
# Image classification docstring
a ="""facebook/regnet-y-040"""
a ="""tabby, tabby cat"""
a =[
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCamelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
__lowerCamelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,strides=SCREAMING_SNAKE_CASE__ ,padding='VALID' ,groups=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution' ,)
__lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
__lowerCamelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : List[Any] = self.convolution(self.padding(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Dict):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = config.num_channels
__lowerCamelCase : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[int] = shape_list(SCREAMING_SNAKE_CASE__)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCamelCase : Optional[int] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 2, 3, 1))
__lowerCamelCase : List[Any] = self.embedder(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,strides=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution')
__lowerCamelCase : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False):
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__)
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
__lowerCamelCase : Dict = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='relu' ,name='attention.0'),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2'),
]
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCamelCase : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE__)
for layer_module in self.attention:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = hidden_state * pooled
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[Any]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = in_channels != out_channels or stride != 1
__lowerCamelCase : Union[str, Any] = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : Dict = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.2'),
]
__lowerCamelCase : Dict = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : int = hidden_state
for layer_module in self.layers:
__lowerCamelCase : List[str] = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : int = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[str]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = in_channels != out_channels or stride != 1
__lowerCamelCase : Tuple = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : int = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetSELayer(SCREAMING_SNAKE_CASE__ ,reduced_channels=int(round(in_channels / 4)) ,name='layer.2'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.3'),
]
__lowerCamelCase : List[Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[int] = hidden_state
for layer_module in self.layers:
__lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : Any = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
__lowerCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='layers.0'),
*[layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,name=F"layers.{i+1}") for i in range(depth - 1)],
]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]):
for layer_module in self.layers:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,))
__lowerCamelCase : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ ,config.depths[1:])):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,depth=SCREAMING_SNAKE_CASE__ ,name=F"stages.{i+1}"))
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True):
__lowerCamelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase : Optional[Any] = hidden_states + (hidden_state,)
__lowerCamelCase : str = stage_module(SCREAMING_SNAKE_CASE__)
if output_hidden_states:
__lowerCamelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__)
@keras_serializable
class A_ ( tf.keras.layers.Layer ):
_UpperCAmelCase : List[Any] = RegNetConfig
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = config
__lowerCamelCase : Optional[int] = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ ,name='embedder')
__lowerCamelCase : Union[str, Any] = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ ,name='encoder')
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
@unpack_inputs
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,):
__lowerCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = encoder_outputs[0]
__lowerCamelCase : int = self.pooler(SCREAMING_SNAKE_CASE__)
# Change to NCHW output format have uniformity in the modules
__lowerCamelCase : Union[str, Any] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
__lowerCamelCase : str = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCamelCase : Union[str, Any] = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = RegNetConfig
_UpperCAmelCase : Optional[int] = '''regnet'''
_UpperCAmelCase : List[Any] = '''pixel_values'''
@property
def lowerCAmelCase ( self : int):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa)}
a =r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
a =r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : int=False ,):
__lowerCamelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : str):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = config.num_labels
__lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
# classification head
__lowerCamelCase : Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='classifier.1') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Any=False ,):
__lowerCamelCase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : str = self.regnet(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : Optional[Any] = self.classifier[0](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.classifier[1](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__)
if not return_dict:
__lowerCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states)
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a ={
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a =object()
# For specifying empty leaf dict `{}`
a =object()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Tuple = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(lowerCamelCase__ ) - len(lowerCamelCase__ ) + 1 ):
__lowerCamelCase : str = [x.match(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , ks[i:] )]
if matches and all(lowerCamelCase__ ):
return True
return False
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
def replace(lowerCamelCase__ , lowerCamelCase__ ):
for rule, replacement in rules:
if _match(lowerCamelCase__ , lowerCamelCase__ ):
return replacement
return val
return replace
def SCREAMING_SNAKE_CASE__ ( ) -> str:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , lowerCamelCase__ )),
(("transformer", "wte", "embedding"), P('mp' , lowerCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = _get_partition_rules()
__lowerCamelCase : Optional[Any] = _replacement_rules(lowerCamelCase__ )
__lowerCamelCase : Tuple = {k: _unmatched for k in flatten_dict(lowerCamelCase__ )}
__lowerCamelCase : List[Any] = {k: replace(lowerCamelCase__ , lowerCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCamelCase__ ) )
| 652 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = '''megatron-bert'''
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]=2_9_0_5_6 ,SCREAMING_SNAKE_CASE__ : int=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : Tuple=2_4 ,SCREAMING_SNAKE_CASE__ : List[str]=1_6 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4_0_9_6 ,SCREAMING_SNAKE_CASE__ : str="gelu" ,SCREAMING_SNAKE_CASE__ : Tuple=0.1 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Dict=5_1_2 ,SCREAMING_SNAKE_CASE__ : str=2 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,SCREAMING_SNAKE_CASE__ : Tuple=1E-12 ,SCREAMING_SNAKE_CASE__ : Tuple=0 ,SCREAMING_SNAKE_CASE__ : str="absolute" ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,**SCREAMING_SNAKE_CASE__ : str ,):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = vocab_size
__lowerCamelCase : str = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : int = num_attention_heads
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : Union[str, Any] = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : str = max_position_embeddings
__lowerCamelCase : List[str] = type_vocab_size
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : Any = position_embedding_type
__lowerCamelCase : Union[str, Any] = use_cache
| 652 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list:
__lowerCamelCase : Union[str, Any] = [True] * n
__lowerCamelCase : List[Any] = False
__lowerCamelCase : int = False
__lowerCamelCase : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__lowerCamelCase : List[str] = i * 2
while index < n:
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = index + i
__lowerCamelCase : Optional[Any] = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int:
__lowerCamelCase : Optional[Any] = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0
__lowerCamelCase : Dict = prime_sieve(lowerCamelCase__ )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 0
__lowerCamelCase : Any = primes[prime_index]
while (last_prime**2) <= limit:
__lowerCamelCase : Any = primes[prime_index + 1]
__lowerCamelCase : Optional[Any] = last_prime**2
__lowerCamelCase : Dict = next_prime**2
# Get numbers divisible by lps(current)
__lowerCamelCase : Tuple = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__lowerCamelCase : Any = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__lowerCamelCase : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__lowerCamelCase : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 652 | 1 |
from __future__ import annotations
import queue
class A_ :
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : int = data
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : Optional[Any] = None
def SCREAMING_SNAKE_CASE__ ( ) -> TreeNode:
print('\n********Press N to stop entering at any point of time********\n' )
__lowerCamelCase : Any = input('Enter the value of the root node: ' ).strip().lower()
__lowerCamelCase : queue.Queue = queue.Queue()
__lowerCamelCase : Tuple = TreeNode(int(lowerCamelCase__ ) )
q.put(lowerCamelCase__ )
while not q.empty():
__lowerCamelCase : List[Any] = q.get()
__lowerCamelCase : List[Any] = F"Enter the left node of {node_found.data}: "
__lowerCamelCase : Tuple = input(lowerCamelCase__ ).strip().lower() or 'n'
if check == "n":
return tree_node
__lowerCamelCase : List[Any] = TreeNode(int(lowerCamelCase__ ) )
__lowerCamelCase : Tuple = left_node
q.put(lowerCamelCase__ )
__lowerCamelCase : List[Any] = F"Enter the right node of {node_found.data}: "
__lowerCamelCase : List[str] = input(lowerCamelCase__ ).strip().lower() or 'n'
if check == "n":
return tree_node
__lowerCamelCase : Optional[int] = TreeNode(int(lowerCamelCase__ ) )
__lowerCamelCase : Tuple = right_node
q.put(lowerCamelCase__ )
raise
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node:
return
print(node.data , end=',' )
pre_order(node.left )
pre_order(node.right )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node:
return
in_order(node.left )
print(node.data , end=',' )
in_order(node.right )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=',' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node:
return
__lowerCamelCase : queue.Queue = queue.Queue()
q.put(lowerCamelCase__ )
while not q.empty():
__lowerCamelCase : List[str] = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node:
return
__lowerCamelCase : queue.Queue = queue.Queue()
q.put(lowerCamelCase__ )
while not q.empty():
__lowerCamelCase : List[Any] = []
while not q.empty():
__lowerCamelCase : Any = q.get()
print(node_dequeued.data , end=',' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node:
return
__lowerCamelCase : list[TreeNode] = []
__lowerCamelCase : Union[str, Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=',' )
stack.append(lowerCamelCase__ )
__lowerCamelCase : Any = n.left
# end of while means current node doesn't have left child
__lowerCamelCase : Dict = stack.pop()
# start to traverse its right child
__lowerCamelCase : List[Any] = n.right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node:
return
__lowerCamelCase : list[TreeNode] = []
__lowerCamelCase : Dict = node
while n or stack:
while n:
stack.append(lowerCamelCase__ )
__lowerCamelCase : str = n.left
__lowerCamelCase : str = stack.pop()
print(n.data , end=',' )
__lowerCamelCase : Union[str, Any] = n.right
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ) or not node:
return
__lowerCamelCase , __lowerCamelCase : str = [], []
__lowerCamelCase : int = node
stacka.append(lowerCamelCase__ )
while stacka: # to find the reversed order of post order, store it in stack2
__lowerCamelCase : str = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(lowerCamelCase__ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=',' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = "" , lowerCamelCase__=5_0 , lowerCamelCase__="*" ) -> str:
if not s:
return "\n" + width * char
__lowerCamelCase , __lowerCamelCase : List[Any] = divmod(width - len(lowerCamelCase__ ) - 2 , 2 )
return F"{left * char} {s} {(left + extra) * char}"
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("""Binary Tree Traversals"""))
a =build_tree()
print(prompt("""Pre Order Traversal"""))
pre_order(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal"""))
in_order(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal"""))
post_order(node)
print(prompt() + """\n""")
print(prompt("""Level Order Traversal"""))
level_order(node)
print(prompt() + """\n""")
print(prompt("""Actual Level Order Traversal"""))
level_order_actual(node)
print("""*""" * 50 + """\n""")
print(prompt("""Pre Order Traversal - Iteration Version"""))
pre_order_iter(node)
print(prompt() + """\n""")
print(prompt("""In Order Traversal - Iteration Version"""))
in_order_iter(node)
print(prompt() + """\n""")
print(prompt("""Post Order Traversal - Iteration Version"""))
post_order_iter(node)
print(prompt())
| 652 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = 1
@register_to_config
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.15 ,SCREAMING_SNAKE_CASE__ : float = 0.01 ,SCREAMING_SNAKE_CASE__ : float = 1348.0 ,SCREAMING_SNAKE_CASE__ : float = 1E-5 ,SCREAMING_SNAKE_CASE__ : int = 1 ,):
# standard deviation of the initial noise distribution
__lowerCamelCase : int = sigma_max
# setable values
__lowerCamelCase : List[str] = None
self.set_sigmas(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[int] = None):
return sample
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None):
__lowerCamelCase : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowerCamelCase : Optional[int] = torch.linspace(1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None):
__lowerCamelCase : Optional[int] = sigma_min if sigma_min is not None else self.config.sigma_min
__lowerCamelCase : Optional[int] = sigma_max if sigma_max is not None else self.config.sigma_max
__lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowerCamelCase : Optional[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE__) ,math.log(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str]):
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
__lowerCamelCase : List[str] = timestep * torch.ones(
sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
__lowerCamelCase : str = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowerCamelCase : Dict = timesteps.to(self.discrete_sigmas.device)
__lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device)
__lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).to(sample.device)
__lowerCamelCase : int = torch.zeros_like(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowerCamelCase : int = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
__lowerCamelCase : List[Any] = diffusion.unsqueeze(-1)
__lowerCamelCase : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowerCamelCase : int = randn_tensor(
sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__ ,device=sample.device ,dtype=sample.dtype)
__lowerCamelCase : Optional[int] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowerCamelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ ,prev_sample_mean=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowerCamelCase : Optional[int] = randn_tensor(sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__).to(sample.device)
# compute step size from the model_output, the noise, and the snr
__lowerCamelCase : str = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowerCamelCase : Optional[int] = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
__lowerCamelCase : List[str] = step_size.unsqueeze(-1)
__lowerCamelCase : str = sample + step_size * model_output
__lowerCamelCase : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase : int = timesteps.to(original_samples.device)
__lowerCamelCase : Any = self.discrete_sigmas.to(original_samples.device)[timesteps]
__lowerCamelCase : Optional[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(SCREAMING_SNAKE_CASE__) * sigmas[:, None, None, None]
)
__lowerCamelCase : str = noise + original_samples
return noisy_samples
def __len__( self : Optional[int]):
return self.config.num_train_timesteps
| 652 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class A_ ( SCREAMING_SNAKE_CASE ):
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Any = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'tf_padding'))
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'depth_multiplier'))
class A_ :
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]=1_3 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Dict=3_2 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.25 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : int=1_0_2_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=3_2 ,SCREAMING_SNAKE_CASE__ : Any="relu6" ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.02 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : Dict=1_0 ,SCREAMING_SNAKE_CASE__ : int=None ,):
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Any = image_size
__lowerCamelCase : Tuple = depth_multiplier
__lowerCamelCase : Any = min_depth
__lowerCamelCase : Union[str, Any] = tf_padding
__lowerCamelCase : Dict = int(last_hidden_size * depth_multiplier)
__lowerCamelCase : Union[str, Any] = output_stride
__lowerCamelCase : Optional[Any] = hidden_act
__lowerCamelCase : str = classifier_dropout_prob
__lowerCamelCase : Tuple = use_labels
__lowerCamelCase : str = is_training
__lowerCamelCase : Optional[Any] = num_labels
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : List[str] = scope
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : Any = None
__lowerCamelCase : int = None
if self.use_labels:
__lowerCamelCase : Tuple = ids_tensor([self.batch_size] ,self.num_labels)
__lowerCamelCase : str = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels)
__lowerCamelCase : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : Optional[int]):
return MobileNetVaConfig(
num_channels=self.num_channels ,image_size=self.image_size ,depth_multiplier=self.depth_multiplier ,min_depth=self.min_depth ,tf_padding=self.tf_padding ,hidden_act=self.hidden_act ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Optional[int] = MobileNetVaModel(config=SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : List[Any] = self.num_labels
__lowerCamelCase : Optional[int] = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels))
def lowerCAmelCase ( self : Any):
__lowerCamelCase : int = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Tuple = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Any = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_UpperCAmelCase : Union[str, Any] = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : Dict = False
_UpperCAmelCase : List[str] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Dict = False
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Union[str, Any] = MobileNetVaModelTester(self)
__lowerCamelCase : Union[str, Any] = MobileNetVaConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str]):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV1 does not use inputs_embeds')
def lowerCAmelCase ( self : Optional[int]):
pass
@unittest.skip(reason='MobileNetV1 does not support input and output embeddings')
def lowerCAmelCase ( self : Any):
pass
@unittest.skip(reason='MobileNetV1 does not output attentions')
def lowerCAmelCase ( self : Union[str, Any]):
pass
def lowerCAmelCase ( self : str):
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : int = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Tuple = [*signature.parameters.keys()]
__lowerCamelCase : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE__)
model.to(SCREAMING_SNAKE_CASE__)
model.eval()
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[Any] = outputs.hidden_states
__lowerCamelCase : Union[str, Any] = 2_6
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Any = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : str = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : int):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : int = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
__lowerCamelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : Any):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224') if is_vision_available() else None
)
@slow
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224').to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.default_image_processor
__lowerCamelCase : Optional[int] = prepare_img()
__lowerCamelCase : str = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='pt').to(SCREAMING_SNAKE_CASE__)
# forward pass
with torch.no_grad():
__lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : int = torch.Size((1, 1_0_0_1))
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = torch.tensor([-4.1739, -1.1233, 3.1205]).to(SCREAMING_SNAKE_CASE__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 652 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : str = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : int = features[:, labels == i]
__lowerCamelCase : Optional[int] = data.mean(1 )
# Centralize the data of class i
__lowerCamelCase : int = data - column_reshape(lowerCamelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCamelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : Union[str, Any] = np.dot(lowerCamelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = features.mean(1 )
__lowerCamelCase : Union[str, Any] = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = features[:, labels == i]
__lowerCamelCase : Union[str, Any] = data.shape[1]
__lowerCamelCase : Union[str, Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : List[str] = device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
__lowerCamelCase : Tuple = features.mean(1 )
# Center the dataset
__lowerCamelCase : Any = features - np.reshape(lowerCamelCase__ , (data_mean.size, 1) )
__lowerCamelCase : Optional[int] = np.dot(lowerCamelCase__ , centered_data.T ) / features.shape[1]
__lowerCamelCase , __lowerCamelCase : List[Any] = np.linalg.eigh(lowerCamelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowerCamelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowerCamelCase : int = np.dot(filtered_eigenvectors.T , lowerCamelCase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowerCamelCase , __lowerCamelCase : Dict = eigh(
covariance_between_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , covariance_within_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , )
__lowerCamelCase : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = np.linalg.svd(lowerCamelCase__ )
__lowerCamelCase : int = svd_matrix[:, 0:dimensions]
__lowerCamelCase : Optional[int] = np.dot(filtered_svd_matrix.T , lowerCamelCase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
__lowerCamelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowerCamelCase : Optional[int] = np.array([0, 0, 0, 1, 1] )
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Tuple = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : int = linear_discriminant_analysis(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if isinstance(lowerCamelCase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowerCamelCase : Dict = 2
__lowerCamelCase : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : Optional[Any] = principal_component_analysis(lowerCamelCase__ , lowerCamelCase__ )
if not np.allclose(lowerCamelCase__ , lowerCamelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
a ="""\
@inproceedings{wang2019glue,
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},
note={In the Proceedings of ICLR.},
year={2019}
}
"""
a ="""\
GLUE, the General Language Understanding Evaluation benchmark
(https://gluebenchmark.com/) is a collection of resources for training,
evaluating, and analyzing natural language understanding systems.
"""
a ="""
Compute GLUE evaluation metric associated to each GLUE dataset.
Args:
predictions: list of predictions to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
Returns: depending on the GLUE subset, one or several of:
\"accuracy\": Accuracy
\"f1\": F1 score
\"pearson\": Pearson Correlation
\"spearmanr\": Spearman Correlation
\"matthews_correlation\": Matthew Correlation
Examples:
>>> glue_metric = datasets.load_metric('glue', 'sst2') # 'sst2' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'mrpc') # 'mrpc' or 'qqp'
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'accuracy': 1.0, 'f1': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'stsb')
>>> references = [0., 1., 2., 3., 4., 5.]
>>> predictions = [0., 1., 2., 3., 4., 5.]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})
{'pearson': 1.0, 'spearmanr': 1.0}
>>> glue_metric = datasets.load_metric('glue', 'cola')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'matthews_correlation': 1.0}
"""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
return float((preds == labels).mean() )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : Dict = simple_accuracy(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = float(fa_score(y_true=lowerCamelCase__ , y_pred=lowerCamelCase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
__lowerCamelCase : Optional[Any] = float(pearsonr(lowerCamelCase__ , lowerCamelCase__ )[0] )
__lowerCamelCase : Optional[Any] = float(spearmanr(lowerCamelCase__ , lowerCamelCase__ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Optional[int]):
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]')
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'stsb' else 'float32'),
'references': datasets.Value('int64' if self.config_name != 'stsb' else 'float32'),
}) ,codebase_urls=[] ,reference_urls=[] ,format='numpy' ,)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Tuple):
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
elif self.config_name == "stsb":
return pearson_and_spearman(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["sst2", "mnli", "mnli_mismatched", "mnli_matched", '
'"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]')
| 652 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))]
if identifier is not None:
__lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
for n_ in n_identifier:
__lowerCamelCase : Optional[int] = [file for file in files if n_ not in file]
else:
__lowerCamelCase : Dict = [file for file in files if n_identifier not in file]
__lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py')
__lowerCamelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,SCREAMING_SNAKE_CASE__)
if only_modules:
__lowerCamelCase : Optional[int] = file.split('.')[0]
try:
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__)
self.assertIs(len(result.failures) ,0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed ,0)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = 'modeling'
__lowerCamelCase : Dict = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = Path('src/transformers')
__lowerCamelCase : Optional[int] = 'tokenization'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = Path('src/transformers')
__lowerCamelCase : str = 'configuration'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = Path('docs/source')
__lowerCamelCase : str = ['favicon.ico']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
| 652 | 1 |
a ={0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
a ={0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
__lowerCamelCase : int = True
__lowerCamelCase : int = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
order.append(lowerCamelCase__ )
return order
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> list[int]:
__lowerCamelCase : Any = True
__lowerCamelCase : Union[str, Any] = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
return component
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list[list[int]]:
__lowerCamelCase : Tuple = len(lowerCamelCase__ ) * [False]
__lowerCamelCase : dict[int, list[int]] = {vert: [] for vert in range(len(lowerCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = []
for i, was_visited in enumerate(lowerCamelCase__ ):
if not was_visited:
order += topology_sort(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[str] = []
__lowerCamelCase : Dict = len(lowerCamelCase__ ) * [False]
for i in range(len(lowerCamelCase__ ) ):
__lowerCamelCase : int = order[len(lowerCamelCase__ ) - i - 1]
if not visited[vert]:
__lowerCamelCase : int = find_components(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
components_list.append(lowerCamelCase__ )
return components_list
| 652 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ="""▁"""
a ={"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
a ={
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
a ={"""vinai/bartpho-syllable""": 1024}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Any="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="<s>" ,SCREAMING_SNAKE_CASE__ : int="<unk>" ,SCREAMING_SNAKE_CASE__ : Dict="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<mask>" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token
__lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : int = vocab_file
__lowerCamelCase : Tuple = monolingual_vocab_file
__lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__))
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : List[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids:
__lowerCamelCase : Any = cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE__ ,'r' ,encoding='utf-8') as f:
for line in f.readlines():
__lowerCamelCase : Any = line.strip().split()[0]
__lowerCamelCase : List[str] = len(self.fairseq_tokens_to_ids)
if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids:
__lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids)
__lowerCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int):
__lowerCamelCase : Tuple = self.__dict__.copy()
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : str = {}
__lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : Tuple = [self.cls_token_id]
__lowerCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Dict = [self.sep_token_id]
__lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def lowerCAmelCase ( self : List[str]):
return len(self.fairseq_ids_to_tokens)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict):
return self.fairseq_ids_to_tokens[index]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Any = ''.join(SCREAMING_SNAKE_CASE__).replace(SCREAMING_SNAKE_CASE__ ,' ').strip()
return out_string
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,)
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
SCREAMING_SNAKE_CASE__) and os.path.isfile(self.monolingual_vocab_file):
copyfile(self.monolingual_vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.monolingual_vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"{str(SCREAMING_SNAKE_CASE__)} \n")
return out_vocab_file, out_monolingual_vocab_file
| 652 | 1 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0 ) -> int:
__lowerCamelCase : List[Any] = sum(i * i for i in range(1 , n + 1 ) )
__lowerCamelCase : Optional[Any] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 652 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict=1_3 ,SCREAMING_SNAKE_CASE__ : int=3_0 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4 ,SCREAMING_SNAKE_CASE__ : List[str]=3_7 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,):
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Dict = image_size
__lowerCamelCase : Optional[Any] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : str = is_training
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Dict = type_sequence_label_size
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : List[str] = scope
__lowerCamelCase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCamelCase : str = (image_size // patch_size) ** 2
__lowerCamelCase : str = num_patches + 2
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : List[Any] = None
if self.use_labels:
__lowerCamelCase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : List[Any]):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Optional[Any] = TFDeiTModel(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Optional[int] = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__lowerCamelCase : int = 1
__lowerCamelCase : Tuple = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : Dict = self.type_sequence_label_size
__lowerCamelCase : List[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
# test greyscale images
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Tuple = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : List[Any] = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[int] = False
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = TFDeiTModelTester(self)
__lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7)
def lowerCAmelCase ( self : str):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Dict):
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer))
__lowerCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,tf.keras.layers.Dense))
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str=False):
__lowerCamelCase : Optional[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__)
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
__lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : List[Any]):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224')
__lowerCamelCase : int = self.default_image_processor
__lowerCamelCase : Tuple = prepare_img()
__lowerCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='tf')
# forward pass
__lowerCamelCase : int = model(**SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : Optional[int] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tf.constant([-1.0266, 0.1912, -1.2861])
self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 652 | 1 |
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a =16
a =32
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = 1_6 ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCamelCase : Union[str, Any] = DatasetDict(
{
'train': dataset['train'].select(lowerCamelCase__ ),
'validation': dataset['train'].select(lowerCamelCase__ ),
'test': dataset['validation'],
} )
def tokenize_function(lowerCamelCase__ ):
# max_length=None => use the model max length (it's actually the default)
__lowerCamelCase : Optional[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCamelCase : str = datasets.map(
lowerCamelCase__ , batched=lowerCamelCase__ , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCamelCase : Optional[int] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(lowerCamelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCamelCase : Any = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCamelCase : Any = 1_6
elif accelerator.mixed_precision != "no":
__lowerCamelCase : Any = 8
else:
__lowerCamelCase : str = None
return tokenizer.pad(
lowerCamelCase__ , padding='longest' , max_length=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_tensors='pt' , )
# Instantiate dataloaders.
__lowerCamelCase : int = DataLoader(
tokenized_datasets['train'] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
__lowerCamelCase : Dict = DataLoader(
tokenized_datasets['test'] , shuffle=lowerCamelCase__ , collate_fn=lowerCamelCase__ , batch_size=lowerCamelCase__ )
return train_dataloader, eval_dataloader, test_dataloader
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
# New Code #
__lowerCamelCase : str = []
# Download the dataset
__lowerCamelCase : Any = load_dataset('glue' , 'mrpc' )
# Create our splits
__lowerCamelCase : int = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__lowerCamelCase : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCamelCase : Optional[Any] = config['lr']
__lowerCamelCase : str = int(config['num_epochs'] )
__lowerCamelCase : Optional[Any] = int(config['seed'] )
__lowerCamelCase : List[Any] = int(config['batch_size'] )
__lowerCamelCase : Union[str, Any] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__lowerCamelCase : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCamelCase : Dict = batch_size // MAX_GPU_BATCH_SIZE
__lowerCamelCase : Optional[int] = MAX_GPU_BATCH_SIZE
set_seed(lowerCamelCase__ )
# New Code #
# Create our folds:
__lowerCamelCase : Union[str, Any] = kfold.split(np.zeros(datasets['train'].num_rows ) , datasets['train']['label'] )
__lowerCamelCase : Tuple = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(lowerCamelCase__ ):
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = get_fold_dataloaders(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCamelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=lowerCamelCase__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCamelCase : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
__lowerCamelCase : Union[str, Any] = AdamW(params=model.parameters() , lr=lowerCamelCase__ )
# Instantiate scheduler
__lowerCamelCase : List[Any] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase__ , num_warmup_steps=1_0_0 , num_training_steps=(len(lowerCamelCase__ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : int = accelerator.prepare(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Now we train the model
for epoch in range(lowerCamelCase__ ):
model.train()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCamelCase : Union[str, Any] = model(**lowerCamelCase__ )
__lowerCamelCase : List[Any] = outputs.loss
__lowerCamelCase : str = loss / gradient_accumulation_steps
accelerator.backward(lowerCamelCase__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase : List[Any] = model(**lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = outputs.logits.argmax(dim=-1 )
__lowerCamelCase , __lowerCamelCase : Tuple = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCamelCase__ , references=lowerCamelCase__ , )
__lowerCamelCase : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , lowerCamelCase__ )
# New Code #
# We also run predictions on the test set at the very end
__lowerCamelCase : int = []
for step, batch in enumerate(lowerCamelCase__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCamelCase : Optional[Any] = model(**lowerCamelCase__ )
__lowerCamelCase : List[str] = outputs.logits
__lowerCamelCase , __lowerCamelCase : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(lowerCamelCase__ , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__lowerCamelCase : Optional[int] = torch.cat(lowerCamelCase__ , dim=0 )
__lowerCamelCase : List[Any] = torch.stack(lowerCamelCase__ , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__lowerCamelCase : Optional[int] = metric.compute(predictions=lowerCamelCase__ , references=lowerCamelCase__ )
accelerator.print('Average test metrics from all folds:' , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
__lowerCamelCase : int = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=lowerCamelCase__ , default=lowerCamelCase__ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
# New Code #
parser.add_argument('--num_folds' , type=lowerCamelCase__ , default=3 , help='The number of splits to perform across the dataset' )
__lowerCamelCase : str = parser.parse_args()
__lowerCamelCase : Optional[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowerCamelCase__ , lowerCamelCase__ )
if __name__ == "__main__":
main()
| 652 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
_UpperCAmelCase : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 652 | 1 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a =get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : str = DebertaVaTokenizer
_UpperCAmelCase : Tuple = DebertaVaTokenizerFast
_UpperCAmelCase : List[str] = True
_UpperCAmelCase : List[Any] = True
def lowerCAmelCase ( self : Optional[int]):
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCamelCase : Optional[int] = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : int = 'this is a test'
__lowerCamelCase : int = 'this is a test'
return input_text, output_text
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : List[str] = '<pad>'
__lowerCamelCase : Dict = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] ,'<pad>')
self.assertEqual(vocab_keys[1] ,'<unk>')
self.assertEqual(vocab_keys[-1] ,'[PAD]')
self.assertEqual(len(SCREAMING_SNAKE_CASE__) ,3_0_0_0_1)
def lowerCAmelCase ( self : Any):
self.assertEqual(self.get_tokenizer().vocab_size ,3_0_0_0_0)
def lowerCAmelCase ( self : Optional[Any]):
# fmt: off
__lowerCamelCase : Optional[Any] = ' \tHeLLo!how \n Are yoU? '
__lowerCamelCase : List[str] = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
__lowerCamelCase : Union[str, Any] = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def lowerCAmelCase ( self : Tuple):
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def lowerCAmelCase ( self : Optional[int]):
pass
def lowerCAmelCase ( self : Union[str, Any]):
# fmt: off
__lowerCamelCase : List[Any] = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : List[str] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__lowerCamelCase : int = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
# fmt: off
__lowerCamelCase : int = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : Union[str, Any] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__lowerCamelCase : Tuple = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
# fmt: off
__lowerCamelCase : List[Any] = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : List[str] = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__lowerCamelCase : Any = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
# fmt: off
__lowerCamelCase : Tuple = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : List[Any] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
__lowerCamelCase : Any = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str]):
# fmt: off
__lowerCamelCase : Tuple = ' \tHeLLo!how \n Are yoU? '
__lowerCamelCase : Union[str, Any] = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
__lowerCamelCase : List[str] = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ ,do_lower_case=SCREAMING_SNAKE_CASE__ ,split_by_punct=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.get_tokenizer()
__lowerCamelCase : Dict = self.get_rust_tokenizer()
__lowerCamelCase : Optional[int] = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__))
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = self.get_rust_tokenizer()
__lowerCamelCase : List[Any] = tokenizer.encode(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[Any] = 'This is a test'
__lowerCamelCase : int = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
__lowerCamelCase : Tuple = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
__lowerCamelCase : str = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
__lowerCamelCase : Union[str, Any] = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = DebertaVaTokenizerFast(SCREAMING_SNAKE_CASE__ ,keep_accents=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# fmt: off
__lowerCamelCase : Optional[int] = 'I was born in 92000, and this is falsé.'
__lowerCamelCase : List[Any] = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
__lowerCamelCase : int = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
__lowerCamelCase : Optional[int] = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
__lowerCamelCase : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = rust_tokenizer.encode(SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = rust_tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Union[str, Any] = DebertaVaTokenizer(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = tokenizer.encode('sequence builders')
__lowerCamelCase : Any = tokenizer.encode('multi-sequence build')
__lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] ,SCREAMING_SNAKE_CASE__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] ,SCREAMING_SNAKE_CASE__ ,)
@slow
def lowerCAmelCase ( self : Optional[int]):
# fmt: off
__lowerCamelCase : Dict = {'input_ids': [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE__ ,model_name='microsoft/deberta-v2-xlarge' ,revision='ad6e42c1532ddf3a15c39246b63f5559d558b670' ,)
| 652 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 652 | 1 |
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
a =get_logger(__name__)
a =r"""
Args:
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
search or log softmax for each vocabulary token when using beam search
kwargs (`Dict[str, Any]`, *optional*):
Additional logits processor specific kwargs.
Return:
`jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
"""
class A_ :
@add_start_docstrings(SCREAMING_SNAKE_CASE__)
def __call__( self : Dict ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called.")
class A_ :
@add_start_docstrings(SCREAMING_SNAKE_CASE__)
def __call__( self : List[str] ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called.")
class A_ ( SCREAMING_SNAKE_CASE ):
@add_start_docstrings(SCREAMING_SNAKE_CASE__)
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : str):
for processor in self:
__lowerCamelCase : List[str] = inspect.signature(processor.__call__).parameters
if len(SCREAMING_SNAKE_CASE__) > 3:
if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
raise ValueError(
F"Make sure that all the required parameters: {list(function_args.keys())} for "
F"{processor.__class__} are passed to the logits processor.")
__lowerCamelCase : Any = processor(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : Optional[int] = processor(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return scores
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : float):
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) or not (temperature > 0):
raise ValueError(F"`temperature` has to be a strictly positive float, but is {temperature}")
__lowerCamelCase : List[Any] = temperature
def __call__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Any = scores / self.temperature
return scores
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : float ,SCREAMING_SNAKE_CASE__ : float = -float('Inf') ,SCREAMING_SNAKE_CASE__ : int = 1):
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"`top_p` has to be a float > 0 and < 1, but is {top_p}")
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) or (min_tokens_to_keep < 1):
raise ValueError(F"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}")
__lowerCamelCase : Optional[int] = top_p
__lowerCamelCase : List[Any] = filter_value
__lowerCamelCase : List[Any] = min_tokens_to_keep
def __call__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase , __lowerCamelCase : Optional[Any] = lax.top_k(SCREAMING_SNAKE_CASE__ ,scores.shape[-1])
__lowerCamelCase : int = jnp.full_like(SCREAMING_SNAKE_CASE__ ,self.filter_value)
__lowerCamelCase : str = jax.nn.softmax(SCREAMING_SNAKE_CASE__ ,axis=-1).cumsum(axis=-1)
__lowerCamelCase : Any = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__lowerCamelCase : List[str] = jnp.roll(SCREAMING_SNAKE_CASE__ ,1)
score_mask |= score_mask.at[:, 0].set(SCREAMING_SNAKE_CASE__)
# min tokens to keep
__lowerCamelCase : List[str] = score_mask.at[:, : self.min_tokens_to_keep].set(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = jnp.where(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = jax.lax.sort_key_val(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)[-1]
return next_scores
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = -float('Inf') ,SCREAMING_SNAKE_CASE__ : int = 1):
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) or top_k <= 0:
raise ValueError(F"`top_k` has to be a strictly positive integer, but is {top_k}")
__lowerCamelCase : int = max(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = filter_value
def __call__( self : Dict ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase , __lowerCamelCase : List[Any] = scores.shape
__lowerCamelCase : Optional[Any] = jnp.full(batch_size * vocab_size ,self.filter_value)
__lowerCamelCase : Any = min(self.top_k ,scores.shape[-1]) # Safety check
__lowerCamelCase , __lowerCamelCase : Any = lax.top_k(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = jnp.broadcast_to((jnp.arange(SCREAMING_SNAKE_CASE__) * vocab_size)[:, None] ,(batch_size, topk)).flatten()
__lowerCamelCase : Any = topk_scores.flatten()
__lowerCamelCase : List[str] = topk_indices.flatten() + shift
__lowerCamelCase : List[Any] = next_scores_flat.at[topk_indices_flat].set(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = next_scores_flat.reshape(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return next_scores
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Optional[int] = bos_token_id
def __call__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = jnp.full(scores.shape ,-float('inf'))
__lowerCamelCase : Tuple = 1 - jnp.bool_(cur_len - 1)
__lowerCamelCase : Optional[Any] = jnp.where(SCREAMING_SNAKE_CASE__ ,new_scores.at[:, self.bos_token_id].set(0) ,SCREAMING_SNAKE_CASE__)
return scores
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : int = max_length
__lowerCamelCase : Optional[Any] = eos_token_id
def __call__( self : Tuple ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : Dict = jnp.full(scores.shape ,-float('inf'))
__lowerCamelCase : Any = 1 - jnp.bool_(cur_len - self.max_length + 1)
__lowerCamelCase : Dict = jnp.where(SCREAMING_SNAKE_CASE__ ,new_scores.at[:, self.eos_token_id].set(0) ,SCREAMING_SNAKE_CASE__)
return scores
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int):
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) or min_length < 0:
raise ValueError(F"`min_length` has to be a positive integer, but is {min_length}")
if not isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) or eos_token_id < 0:
raise ValueError(F"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
__lowerCamelCase : Dict = min_length
__lowerCamelCase : Tuple = eos_token_id
def __call__( self : List[str] ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
# create boolean flag to decide if min length penalty should be applied
__lowerCamelCase : int = 1 - jnp.clip(cur_len - self.min_length ,0 ,1)
__lowerCamelCase : Optional[int] = jnp.where(SCREAMING_SNAKE_CASE__ ,scores.at[:, self.eos_token_id].set(-float('inf')) ,SCREAMING_SNAKE_CASE__)
return scores
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : str = list(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = begin_index
def __call__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : str = 1 - jnp.bool_(cur_len - self.begin_index)
__lowerCamelCase : Dict = jnp.where(SCREAMING_SNAKE_CASE__ ,scores.at[:, self.begin_suppress_tokens].set(-float('inf')) ,SCREAMING_SNAKE_CASE__)
return scores
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : list):
__lowerCamelCase : Optional[int] = list(SCREAMING_SNAKE_CASE__)
def __call__( self : Any ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[str] = scores.at[..., self.suppress_tokens].set(-float('inf'))
return scores
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : int = dict(SCREAMING_SNAKE_CASE__)
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__lowerCamelCase : List[str] = jnp.ones((max(force_token_map.keys()) + 1) ,dtype=jnp.intaa) * -1
for index, token in force_token_map.items():
if token is not None:
__lowerCamelCase : str = force_token_array.at[index].set(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = jnp.intaa(SCREAMING_SNAKE_CASE__)
def __call__( self : Any ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : jnp.ndarray ,SCREAMING_SNAKE_CASE__ : int):
def _force_token(SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Any = scores.shape[0]
__lowerCamelCase : List[str] = self.force_token_array[generation_idx]
__lowerCamelCase : List[Any] = jnp.ones_like(SCREAMING_SNAKE_CASE__ ,dtype=scores.dtype) * -float('inf')
__lowerCamelCase : Optional[Any] = jnp.zeros((batch_size, 1) ,dtype=scores.dtype)
__lowerCamelCase : Tuple = lax.dynamic_update_slice(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,(0, current_token))
return new_scores
__lowerCamelCase : Optional[int] = lax.cond(
cur_len >= self.force_token_array.shape[0] ,lambda: scores ,lambda: lax.cond(
self.force_token_array[cur_len] >= 0 ,lambda: _force_token(SCREAMING_SNAKE_CASE__) ,lambda: scores ,) ,)
return scores
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : List[str] = generate_config.eos_token_id
__lowerCamelCase : Union[str, Any] = generate_config.no_timestamps_token_id
__lowerCamelCase : Union[str, Any] = generate_config.no_timestamps_token_id + 1
__lowerCamelCase : Union[str, Any] = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(SCREAMING_SNAKE_CASE__ ,'max_initial_timestamp_index'):
__lowerCamelCase : int = generate_config.max_initial_timestamp_index
else:
__lowerCamelCase : Any = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__lowerCamelCase : int = model_config.vocab_size
def __call__( self : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Any):
# suppress <|notimestamps|> which is handled by without_timestamps
__lowerCamelCase : Optional[int] = scores.at[:, self.no_timestamps_token_id].set(-float('inf'))
def handle_pairs(SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : Union[str, Any] = jnp.where((cur_len - self.begin_index) >= 1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin ,True and last_was_timestamp ,SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : List[str] = jnp.where((cur_len - self.begin_index) < 2 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,)
return jnp.where(
SCREAMING_SNAKE_CASE__ ,jnp.where(
penultimate_was_timestamp > 0 ,scores_k.at[self.timestamp_begin :].set(-float('inf')) ,scores_k.at[: self.eos_token_id].set(-float('inf')) ,) ,SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : int = jax.vmap(SCREAMING_SNAKE_CASE__)(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = jnp.where(cur_len == self.begin_index ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = jnp.where(
self.max_initial_timestamp_index is not None ,True and apply_max_initial_timestamp ,SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : str = self.timestamp_begin + self.max_initial_timestamp_index
__lowerCamelCase : int = jnp.where(
SCREAMING_SNAKE_CASE__ ,scores.at[:, last_allowed + 1 :].set(-float('inf')) ,SCREAMING_SNAKE_CASE__ ,)
# if sum of probability over timestamps is above any other token, sample timestamp
__lowerCamelCase : List[str] = jax.nn.log_softmax(SCREAMING_SNAKE_CASE__ ,axis=-1)
def handle_cumulative_probs(SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple):
__lowerCamelCase : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] ,axis=-1)
__lowerCamelCase : int = jnp.max(logprobs_k[: self.timestamp_begin])
return jnp.where(
timestamp_logprob > max_text_token_logprob ,scores_k.at[: self.timestamp_begin].set(-float('inf')) ,SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Any = jax.vmap(SCREAMING_SNAKE_CASE__)(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
return scores
| 652 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={"""vocab_file""": """vocab.txt"""}
a ={
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
a ={
"""openbmb/cpm-ant-10b""": 1024,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : int = collections.OrderedDict()
with open(lowerCamelCase__ , 'r' , encoding='utf-8' ) as reader:
__lowerCamelCase : Optional[int] = reader.readlines()
for index, token in enumerate(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = token.rstrip('\n' )
__lowerCamelCase : Union[str, Any] = index
return vocab
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" ,SCREAMING_SNAKE_CASE__ : Optional[int]=2_0_0):
__lowerCamelCase : str = vocab
__lowerCamelCase : Dict = unk_token
__lowerCamelCase : int = max_input_chars_per_word
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : int = list(SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > self.max_input_chars_per_word:
return [self.unk_token]
__lowerCamelCase : Tuple = 0
__lowerCamelCase : str = []
while start < len(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : List[Any] = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = None
while start < end:
__lowerCamelCase : Any = ''.join(chars[start:end])
if substr in self.vocab:
__lowerCamelCase : Optional[Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token)
start += 1
else:
sub_tokens.append(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = end
return sub_tokens
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : str = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase : Optional[int] = False
def __init__( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Tuple="<d>" ,SCREAMING_SNAKE_CASE__ : Tuple="</d>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="</s>" ,SCREAMING_SNAKE_CASE__ : str="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<unk>" ,SCREAMING_SNAKE_CASE__ : List[Any]="</n>" ,SCREAMING_SNAKE_CASE__ : int="</_>" ,SCREAMING_SNAKE_CASE__ : List[Any]="left" ,**SCREAMING_SNAKE_CASE__ : List[str] ,):
requires_backends(self ,['jieba'])
super().__init__(
bod_token=SCREAMING_SNAKE_CASE__ ,eod_token=SCREAMING_SNAKE_CASE__ ,bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,line_token=SCREAMING_SNAKE_CASE__ ,space_token=SCREAMING_SNAKE_CASE__ ,padding_side=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Optional[Any] = bod_token
__lowerCamelCase : Dict = eod_token
__lowerCamelCase : Any = load_vocab(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = self.encoder[space_token]
__lowerCamelCase : Dict = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowerCamelCase : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1]))
__lowerCamelCase : int = {v: k for k, v in self.encoder.items()}
__lowerCamelCase : Union[str, Any] = WordpieceTokenizer(vocab=self.encoder ,unk_token=self.unk_token)
@property
def lowerCAmelCase ( self : List[Any]):
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : Tuple):
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : Union[str, Any]):
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : str):
return len(self.encoder)
def lowerCAmelCase ( self : str):
return dict(self.encoder ,**self.added_tokens_encoder)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : Any = []
for x in jieba.cut(SCREAMING_SNAKE_CASE__ ,cut_all=SCREAMING_SNAKE_CASE__):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(SCREAMING_SNAKE_CASE__))
return output_tokens
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Tuple = [i for i in token_ids if i >= 0]
__lowerCamelCase : str = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[Any]):
return token in self.encoder
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[str]):
return "".join(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]):
return self.encoder.get(SCREAMING_SNAKE_CASE__ ,self.encoder.get(self.unk_token))
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any]):
return self.decoder.get(SCREAMING_SNAKE_CASE__ ,self.unk_token)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if os.path.isdir(SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
else:
__lowerCamelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
__lowerCamelCase : Any = 0
if " " in self.encoder:
__lowerCamelCase : Any = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__lowerCamelCase : str = self.encoder['\n']
del self.encoder["\n"]
__lowerCamelCase : str = collections.OrderedDict(sorted(self.encoder.items() ,key=lambda SCREAMING_SNAKE_CASE__: x[1]))
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!')
__lowerCamelCase : Any = token_index
writer.write(token + '\n')
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : List[int] = None):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is not None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__))
| 652 | 1 |
from math import factorial
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0 ) -> int:
return sum(map(lowerCamelCase__ , str(factorial(lowerCamelCase__ ) ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 652 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
a =logging.get_logger(__name__)
a ={
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[Any] = '''perceiver'''
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : str=2_5_6 ,SCREAMING_SNAKE_CASE__ : int=1_2_8_0 ,SCREAMING_SNAKE_CASE__ : Dict=7_6_8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=2_6 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=8 ,SCREAMING_SNAKE_CASE__ : Tuple=8 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ,SCREAMING_SNAKE_CASE__ : List[str]=None ,SCREAMING_SNAKE_CASE__ : int="kv" ,SCREAMING_SNAKE_CASE__ : List[str]=1 ,SCREAMING_SNAKE_CASE__ : int=1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1E-12 ,SCREAMING_SNAKE_CASE__ : Dict=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2_6_2 ,SCREAMING_SNAKE_CASE__ : Tuple=2_0_4_8 ,SCREAMING_SNAKE_CASE__ : Optional[int]=5_6 ,SCREAMING_SNAKE_CASE__ : int=[3_6_8, 4_9_6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=1_6 ,SCREAMING_SNAKE_CASE__ : List[str]=1_9_2_0 ,SCREAMING_SNAKE_CASE__ : Any=1_6 ,SCREAMING_SNAKE_CASE__ : Tuple=[1, 1_6, 2_2_4, 2_2_4] ,**SCREAMING_SNAKE_CASE__ : List[Any] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = num_latents
__lowerCamelCase : List[str] = d_latents
__lowerCamelCase : Optional[int] = d_model
__lowerCamelCase : Union[str, Any] = num_blocks
__lowerCamelCase : str = num_self_attends_per_block
__lowerCamelCase : int = num_self_attention_heads
__lowerCamelCase : Tuple = num_cross_attention_heads
__lowerCamelCase : Tuple = qk_channels
__lowerCamelCase : Tuple = v_channels
__lowerCamelCase : int = cross_attention_shape_for_attention
__lowerCamelCase : List[Any] = self_attention_widening_factor
__lowerCamelCase : Optional[Any] = cross_attention_widening_factor
__lowerCamelCase : List[Any] = hidden_act
__lowerCamelCase : Any = attention_probs_dropout_prob
__lowerCamelCase : int = initializer_range
__lowerCamelCase : Union[str, Any] = layer_norm_eps
__lowerCamelCase : Union[str, Any] = use_query_residual
# masked language modeling attributes
__lowerCamelCase : Dict = vocab_size
__lowerCamelCase : Dict = max_position_embeddings
# image classification attributes
__lowerCamelCase : Any = image_size
# flow attributes
__lowerCamelCase : int = train_size
# multimodal autoencoding attributes
__lowerCamelCase : List[str] = num_frames
__lowerCamelCase : Optional[int] = audio_samples_per_frame
__lowerCamelCase : Dict = samples_per_patch
__lowerCamelCase : Optional[int] = output_shape
class A_ ( SCREAMING_SNAKE_CASE ):
@property
def lowerCAmelCase ( self : str):
if self.task == "multiple-choice":
__lowerCamelCase : Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__lowerCamelCase : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('inputs', dynamic_axis),
('attention_mask', dynamic_axis),
])
@property
def lowerCAmelCase ( self : Any):
return 1E-4
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : int = -1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[TensorType] = None ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 4_0 ,SCREAMING_SNAKE_CASE__ : int = 4_0 ,):
# copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCamelCase : List[str] = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCamelCase : Tuple = preprocessor.num_special_tokens_to_add(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = compute_effective_axis_dimension(
SCREAMING_SNAKE_CASE__ ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=SCREAMING_SNAKE_CASE__)
# Generate dummy inputs according to compute batch and sequence
__lowerCamelCase : List[Any] = [' '.join(['a']) * seq_length] * batch_size
__lowerCamelCase : Dict = dict(preprocessor(SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Any = inputs.pop('input_ids')
return inputs
elif isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCamelCase : Optional[Any] = compute_effective_axis_dimension(SCREAMING_SNAKE_CASE__ ,fixed_dimension=OnnxConfig.default_fixed_batch)
__lowerCamelCase : Tuple = self._generate_dummy_images(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = dict(preprocessor(images=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Optional[int] = inputs.pop('pixel_values')
return inputs
else:
raise ValueError(
'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.')
| 652 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : int = (UnCLIPScheduler,)
def lowerCAmelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Any = {
'num_train_timesteps': 1_0_0_0,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**SCREAMING_SNAKE_CASE__)
return config
def lowerCAmelCase ( self : Optional[Any]):
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[Any]):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Union[str, Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
for clip_sample_range in [1, 5, 1_0, 2_0]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any]):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
for time_step in [0, 5_0_0, 9_9_9]:
for prev_timestep in [None, 5, 1_0_0, 2_5_0, 5_0_0, 7_5_0]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int]):
__lowerCamelCase : Optional[int] = self.scheduler_classes[0]
__lowerCamelCase : Any = self.get_scheduler_config(variance_type='fixed_small_log')
__lowerCamelCase : Dict = scheduler_class(**SCREAMING_SNAKE_CASE__)
assert torch.sum(torch.abs(scheduler._get_variance(0) - 1.00_00E-10)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7) - 0.0549625)) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9) - 0.9994987)) < 1E-5
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Dict = self.scheduler_classes[0]
__lowerCamelCase : List[str] = self.get_scheduler_config(variance_type='learned_range')
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = 0.5
assert scheduler._get_variance(1 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -10.1712790 < 1E-5
assert scheduler._get_variance(4_8_7 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -5.7998052 < 1E-5
assert scheduler._get_variance(9_9_9 ,predicted_variance=SCREAMING_SNAKE_CASE__) - -0.0010011 < 1E-5
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : str = self.get_scheduler_config()
__lowerCamelCase : List[str] = scheduler_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = scheduler.timesteps
__lowerCamelCase : Union[str, Any] = self.dummy_model()
__lowerCamelCase : Optional[Any] = self.dummy_sample_deter
__lowerCamelCase : List[str] = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : int = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : Optional[int] = scheduler.step(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Optional[Any] = pred_prev_sample
__lowerCamelCase : Union[str, Any] = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Tuple = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 252.2682495) < 1E-2
assert abs(result_mean.item() - 0.3284743) < 1E-3
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.scheduler_classes[0]
__lowerCamelCase : List[Any] = self.get_scheduler_config()
__lowerCamelCase : int = scheduler_class(**SCREAMING_SNAKE_CASE__)
scheduler.set_timesteps(2_5)
__lowerCamelCase : int = scheduler.timesteps
__lowerCamelCase : Tuple = self.dummy_model()
__lowerCamelCase : Any = self.dummy_sample_deter
__lowerCamelCase : Any = torch.manual_seed(0)
for i, t in enumerate(SCREAMING_SNAKE_CASE__):
# 1. predict noise residual
__lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if i + 1 == timesteps.shape[0]:
__lowerCamelCase : Optional[Any] = None
else:
__lowerCamelCase : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCamelCase : int = scheduler.step(
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,prev_timestep=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__).prev_sample
__lowerCamelCase : Union[str, Any] = pred_prev_sample
__lowerCamelCase : Tuple = torch.sum(torch.abs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__))
assert abs(result_sum.item() - 258.2044983) < 1E-2
assert abs(result_mean.item() - 0.3362038) < 1E-3
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Union[str, Any]):
pass
| 652 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=None ) -> Optional[Any]:
if subparsers is not None:
__lowerCamelCase : Dict = subparsers.add_parser('env' )
else:
__lowerCamelCase : List[Any] = argparse.ArgumentParser('Accelerate env command' )
parser.add_argument(
'--config_file' , default=lowerCamelCase__ , help='The config file to use for the default values in the launching script.' )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase__ )
return parser
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
__lowerCamelCase : Optional[Any] = torch.__version__
__lowerCamelCase : Union[str, Any] = torch.cuda.is_available()
__lowerCamelCase : Tuple = is_xpu_available()
__lowerCamelCase : str = is_npu_available()
__lowerCamelCase : List[str] = 'Not found'
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCamelCase__ ):
__lowerCamelCase : str = load_config_from_file(args.config_file ).to_dict()
__lowerCamelCase : str = {
'`Accelerate` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Numpy version': np.__version__,
'PyTorch version (GPU?)': F"{pt_version} ({pt_cuda_available})",
'PyTorch XPU available': str(lowerCamelCase__ ),
'PyTorch NPU available': str(lowerCamelCase__ ),
'System RAM': F"{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB",
}
if pt_cuda_available:
__lowerCamelCase : Union[str, Any] = torch.cuda.get_device_name()
print('\nCopy-and-paste the text below in your GitHub issue\n' )
print('\n'.join([F"- {prop}: {val}" for prop, val in info.items()] ) )
print('- `Accelerate` default config:' if args.config_file is None else '- `Accelerate` config passed:' )
__lowerCamelCase : Union[str, Any] = (
'\n'.join([F"\t- {prop}: {val}" for prop, val in accelerate_config.items()] )
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
else F"\t{accelerate_config}"
)
print(lowerCamelCase__ )
__lowerCamelCase : Any = accelerate_config
return info
def SCREAMING_SNAKE_CASE__ ( ) -> int:
__lowerCamelCase : List[str] = env_command_parser()
__lowerCamelCase : Tuple = parser.parse_args()
env_command(lowerCamelCase__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 652 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] = '''swin2sr'''
_UpperCAmelCase : Any = {
'''hidden_size''': '''embed_dim''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=6_4 ,SCREAMING_SNAKE_CASE__ : Optional[int]=1 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=1_8_0 ,SCREAMING_SNAKE_CASE__ : Any=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : int=[6, 6, 6, 6, 6, 6] ,SCREAMING_SNAKE_CASE__ : Optional[Any]=8 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2.0 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : Any=0.0 ,SCREAMING_SNAKE_CASE__ : List[str]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Any=False ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Dict=1E-5 ,SCREAMING_SNAKE_CASE__ : Dict=2 ,SCREAMING_SNAKE_CASE__ : Tuple=1.0 ,SCREAMING_SNAKE_CASE__ : int="1conv" ,SCREAMING_SNAKE_CASE__ : Optional[int]="pixelshuffle" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = image_size
__lowerCamelCase : str = patch_size
__lowerCamelCase : List[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : Dict = depths
__lowerCamelCase : Any = len(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = num_heads
__lowerCamelCase : Tuple = window_size
__lowerCamelCase : Dict = mlp_ratio
__lowerCamelCase : str = qkv_bias
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : Dict = use_absolute_embeddings
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : List[Any] = upscale
__lowerCamelCase : List[Any] = img_range
__lowerCamelCase : List[str] = resi_connection
__lowerCamelCase : Union[str, Any] = upsampler
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a ={
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__="pt" ) -> Dict:
__lowerCamelCase : Any = {'add_prefix_space': True} if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and not line.startswith(' ' ) else {}
__lowerCamelCase : int = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase__ , padding='max_length' if pad_to_max_length else None , truncation=lowerCamelCase__ , return_tensors=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , ) -> List[str]:
__lowerCamelCase : List[str] = input_ids.ne(lowerCamelCase__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]="train" ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : int=None ,SCREAMING_SNAKE_CASE__ : List[Any]="" ,):
super().__init__()
__lowerCamelCase : Optional[Any] = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.source')
__lowerCamelCase : Any = Path(SCREAMING_SNAKE_CASE__).joinpath(type_path + '.target')
__lowerCamelCase : List[Any] = self.get_char_lens(self.src_file)
__lowerCamelCase : List[Any] = max_source_length
__lowerCamelCase : List[str] = max_target_length
assert min(self.src_lens) > 0, F"found empty line in {self.src_file}"
__lowerCamelCase : Any = tokenizer
__lowerCamelCase : Optional[int] = prefix
if n_obs is not None:
__lowerCamelCase : Dict = self.src_lens[:n_obs]
__lowerCamelCase : str = src_lang
__lowerCamelCase : Any = tgt_lang
def __len__( self : Tuple):
return len(self.src_lens)
def __getitem__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Dict = index + 1 # linecache starts at 1
__lowerCamelCase : Any = self.prefix + linecache.getline(str(self.src_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
__lowerCamelCase : int = linecache.getline(str(self.tgt_file) ,SCREAMING_SNAKE_CASE__).rstrip('\n')
assert source_line, F"empty source line for index {index}"
assert tgt_line, F"empty tgt line for index {index}"
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
__lowerCamelCase : Dict = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
)
__lowerCamelCase : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__) else self.tokenizer
__lowerCamelCase : List[str] = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_source_length ,'right')
__lowerCamelCase : Any = encode_line(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,self.max_target_length ,'right')
__lowerCamelCase : List[Any] = source_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = target_inputs['input_ids'].squeeze()
__lowerCamelCase : Tuple = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def lowerCAmelCase ( SCREAMING_SNAKE_CASE__ : int):
return [len(SCREAMING_SNAKE_CASE__) for x in Path(SCREAMING_SNAKE_CASE__).open().readlines()]
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[Any] = torch.stack([x['input_ids'] for x in batch])
__lowerCamelCase : Any = torch.stack([x['attention_mask'] for x in batch])
__lowerCamelCase : Union[str, Any] = torch.stack([x['decoder_input_ids'] for x in batch])
__lowerCamelCase : Optional[int] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,SCREAMING_SNAKE_CASE__)
else self.tokenizer.pad_token_id
)
__lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase , __lowerCamelCase : int = trim_batch(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
a =getLogger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Any:
return list(itertools.chain.from_iterable(lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> None:
__lowerCamelCase : str = get_git_info()
save_json(lowerCamelCase__ , os.path.join(lowerCamelCase__ , 'git_log.json' ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=4 , **lowerCamelCase__ ) -> List[str]:
with open(lowerCamelCase__ , 'w' ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ , indent=lowerCamelCase__ , **lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
with open(lowerCamelCase__ ) as f:
return json.load(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
__lowerCamelCase : str = git.Repo(search_parent_directories=lowerCamelCase__ )
__lowerCamelCase : Any = {
'repo_id': str(lowerCamelCase__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List:
return list(map(lowerCamelCase__ , lowerCamelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
with open(lowerCamelCase__ , 'wb' ) as f:
return pickle.dump(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
def remove_articles(lowerCamelCase__ ):
return re.sub(R'\b(a|an|the)\b' , ' ' , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ ):
__lowerCamelCase : Dict = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : str = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Optional[int] = normalize_answer(lowerCamelCase__ ).split()
__lowerCamelCase : Union[str, Any] = Counter(lowerCamelCase__ ) & Counter(lowerCamelCase__ )
__lowerCamelCase : Any = sum(common.values() )
if num_same == 0:
return 0
__lowerCamelCase : List[Any] = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : int = 1.0 * num_same / len(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = (2 * precision * recall) / (precision + recall)
return fa
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
return normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
assert len(lowerCamelCase__ ) == len(lowerCamelCase__ )
__lowerCamelCase : Dict = 0
for hypo, pred in zip(lowerCamelCase__ , lowerCamelCase__ ):
em += exact_match_score(lowerCamelCase__ , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
em /= len(lowerCamelCase__ )
return {"em": em}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
return model_prefix.startswith('rag' )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
__lowerCamelCase : List[str] = 'dropout_rate'
for p in extra_params:
if getattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
if not hasattr(lowerCamelCase__ , lowerCamelCase__ ) and not hasattr(lowerCamelCase__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
continue
__lowerCamelCase : List[Any] = p if hasattr(lowerCamelCase__ , lowerCamelCase__ ) else equivalent_param[p]
setattr(lowerCamelCase__ , lowerCamelCase__ , getattr(lowerCamelCase__ , lowerCamelCase__ ) )
delattr(lowerCamelCase__ , lowerCamelCase__ )
return hparams, config
| 652 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list[int]:
return [ord(lowerCamelCase__ ) - 9_6 for elem in plain]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
return "".join(chr(elem + 9_6 ) for elem in encoded )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : List[str] = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , lowerCamelCase__ )
print('Decoded:' , decode(lowerCamelCase__ ) )
if __name__ == "__main__":
main()
| 652 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
a =logging.get_logger(__name__)
# General docstring
a ="""MobileNetV1Config"""
# Base docstring
a ="""google/mobilenet_v1_1.0_224"""
a =[1, 1024, 7, 7]
# Image classification docstring
a ="""google/mobilenet_v1_1.0_224"""
a ="""tabby, tabby cat"""
a =[
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> str:
__lowerCamelCase : str = {}
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : int = model.mobilenet_va
else:
__lowerCamelCase : List[str] = model
__lowerCamelCase : List[Any] = 'MobilenetV1/Conv2d_0/'
__lowerCamelCase : List[Any] = backbone.conv_stem.convolution.weight
__lowerCamelCase : List[str] = backbone.conv_stem.normalization.bias
__lowerCamelCase : Tuple = backbone.conv_stem.normalization.weight
__lowerCamelCase : Union[str, Any] = backbone.conv_stem.normalization.running_mean
__lowerCamelCase : Optional[int] = backbone.conv_stem.normalization.running_var
for i in range(1_3 ):
__lowerCamelCase : Any = i + 1
__lowerCamelCase : Union[str, Any] = i * 2
__lowerCamelCase : Optional[Any] = backbone.layer[pt_index]
__lowerCamelCase : Optional[int] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
__lowerCamelCase : Tuple = pointer.convolution.weight
__lowerCamelCase : Optional[Any] = pointer.normalization.bias
__lowerCamelCase : Union[str, Any] = pointer.normalization.weight
__lowerCamelCase : List[str] = pointer.normalization.running_mean
__lowerCamelCase : Union[str, Any] = pointer.normalization.running_var
__lowerCamelCase : int = backbone.layer[pt_index + 1]
__lowerCamelCase : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
__lowerCamelCase : Optional[Any] = pointer.convolution.weight
__lowerCamelCase : Any = pointer.normalization.bias
__lowerCamelCase : str = pointer.normalization.weight
__lowerCamelCase : Dict = pointer.normalization.running_mean
__lowerCamelCase : List[str] = pointer.normalization.running_var
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : Union[str, Any] = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
__lowerCamelCase : Any = model.classifier.weight
__lowerCamelCase : int = model.classifier.bias
return tf_to_pt_map
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__lowerCamelCase : List[str] = tf.train.list_variables(lowerCamelCase__ )
__lowerCamelCase : List[str] = {}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
__lowerCamelCase : Any = tf.train.load_variable(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : List[Any] = array
# Build TF to PyTorch weights loading map
__lowerCamelCase : Tuple = _build_tf_to_pytorch_map(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
__lowerCamelCase : Optional[int] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__lowerCamelCase : List[str] = np.transpose(lowerCamelCase__ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__lowerCamelCase : Any = array.squeeze().transpose()
else:
__lowerCamelCase : Tuple = np.transpose(lowerCamelCase__ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
__lowerCamelCase : Optional[Any] = torch.from_numpy(lowerCamelCase__ )
tf_weights.pop(lowerCamelCase__ , lowerCamelCase__ )
tf_weights.pop(name + '/RMSProp' , lowerCamelCase__ )
tf_weights.pop(name + '/RMSProp_1' , lowerCamelCase__ )
tf_weights.pop(name + '/ExponentialMovingAverage' , lowerCamelCase__ )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> torch.Tensor:
__lowerCamelCase , __lowerCamelCase : int = features.shape[-2:]
__lowerCamelCase , __lowerCamelCase : List[str] = conv_layer.stride
__lowerCamelCase , __lowerCamelCase : str = conv_layer.kernel_size
if in_height % stride_height == 0:
__lowerCamelCase : Optional[int] = max(kernel_height - stride_height , 0 )
else:
__lowerCamelCase : Union[str, Any] = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__lowerCamelCase : List[str] = max(kernel_width - stride_width , 0 )
else:
__lowerCamelCase : List[str] = max(kernel_width - (in_width % stride_width) , 0 )
__lowerCamelCase : List[str] = pad_along_width // 2
__lowerCamelCase : Optional[int] = pad_along_width - pad_left
__lowerCamelCase : Any = pad_along_height // 2
__lowerCamelCase : List[Any] = pad_along_height - pad_top
__lowerCamelCase : Union[str, Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase__ , lowerCamelCase__ , 'constant' , 0.0 )
class A_ ( nn.Module ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : Optional[int] = 1 ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : Optional[bool] = True ,SCREAMING_SNAKE_CASE__ : Optional[bool or str] = True ,):
super().__init__()
__lowerCamelCase : Dict = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups.")
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups.")
__lowerCamelCase : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2)
__lowerCamelCase : Optional[int] = nn.Convad(
in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,bias=SCREAMING_SNAKE_CASE__ ,padding_mode='zeros' ,)
if use_normalization:
__lowerCamelCase : Optional[int] = nn.BatchNormad(
num_features=SCREAMING_SNAKE_CASE__ ,eps=config.layer_norm_eps ,momentum=0.9997 ,affine=SCREAMING_SNAKE_CASE__ ,track_running_stats=SCREAMING_SNAKE_CASE__ ,)
else:
__lowerCamelCase : Dict = None
if use_activation:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Dict = ACTaFN[use_activation]
elif isinstance(config.hidden_act ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : str = ACTaFN[config.hidden_act]
else:
__lowerCamelCase : List[str] = config.hidden_act
else:
__lowerCamelCase : List[str] = None
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.Tensor):
if self.config.tf_padding:
__lowerCamelCase : Any = apply_tf_padding(SCREAMING_SNAKE_CASE__ ,self.convolution)
__lowerCamelCase : Optional[int] = self.convolution(SCREAMING_SNAKE_CASE__)
if self.normalization is not None:
__lowerCamelCase : Dict = self.normalization(SCREAMING_SNAKE_CASE__)
if self.activation is not None:
__lowerCamelCase : List[str] = self.activation(SCREAMING_SNAKE_CASE__)
return features
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Union[str, Any] = MobileNetVaConfig
_UpperCAmelCase : List[str] = load_tf_weights_in_mobilenet_va
_UpperCAmelCase : List[str] = '''mobilenet_v1'''
_UpperCAmelCase : Any = '''pixel_values'''
_UpperCAmelCase : int = False
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Union[nn.Linear, nn.Convad]):
if isinstance(SCREAMING_SNAKE_CASE__ ,(nn.Linear, nn.Convad)):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(SCREAMING_SNAKE_CASE__ ,nn.BatchNormad):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
a =r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
a =r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig ,SCREAMING_SNAKE_CASE__ : bool = True):
super().__init__(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = config
__lowerCamelCase : Optional[int] = 3_2
__lowerCamelCase : List[str] = max(int(depth * config.depth_multiplier) ,config.min_depth)
__lowerCamelCase : Optional[Any] = MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=config.num_channels ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=2 ,)
__lowerCamelCase : Any = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__lowerCamelCase : str = nn.ModuleList()
for i in range(1_3):
__lowerCamelCase : str = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__lowerCamelCase : str = max(int(depth * config.depth_multiplier) ,config.min_depth)
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=3 ,stride=strides[i] ,groups=SCREAMING_SNAKE_CASE__ ,))
self.layer.append(
MobileNetVaConvLayer(
SCREAMING_SNAKE_CASE__ ,in_channels=SCREAMING_SNAKE_CASE__ ,out_channels=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,))
__lowerCamelCase : Optional[int] = nn.AdaptiveAvgPoolad((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict):
raise NotImplementedError
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,):
__lowerCamelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values')
__lowerCamelCase : Optional[Any] = self.conv_stem(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
__lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__)
if output_hidden_states:
__lowerCamelCase : Any = all_hidden_states + (hidden_states,)
__lowerCamelCase : Optional[Any] = hidden_states
if self.pooler is not None:
__lowerCamelCase : Tuple = torch.flatten(self.pooler(SCREAMING_SNAKE_CASE__) ,start_dim=1)
else:
__lowerCamelCase : List[str] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__ ,)
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : MobileNetVaConfig):
super().__init__(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = config.num_labels
__lowerCamelCase : Optional[Any] = MobileNetVaModel(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__lowerCamelCase : Any = nn.Dropout(config.classifier_dropout_prob ,inplace=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ ,config.num_labels) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase ( self : str ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[torch.Tensor] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,):
__lowerCamelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Optional[int] = self.mobilenet_va(SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : List[str] = self.classifier(self.dropout(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCamelCase : Dict = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCamelCase : int = 'single_label_classification'
else:
__lowerCamelCase : Tuple = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowerCamelCase : Tuple = MSELoss()
if self.num_labels == 1:
__lowerCamelCase : int = loss_fct(logits.squeeze() ,labels.squeeze())
else:
__lowerCamelCase : Union[str, Any] = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
elif self.config.problem_type == "single_label_classification":
__lowerCamelCase : List[str] = CrossEntropyLoss()
__lowerCamelCase : List[str] = loss_fct(logits.view(-1 ,self.num_labels) ,labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
__lowerCamelCase : int = BCEWithLogitsLoss()
__lowerCamelCase : int = loss_fct(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
if not return_dict:
__lowerCamelCase : List[str] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states ,)
| 652 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
a =False
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple=3_2):
set_seed(0)
__lowerCamelCase : Tuple = UNetaDModel(sample_size=SCREAMING_SNAKE_CASE__ ,in_channels=3 ,out_channels=3)
__lowerCamelCase : str = torch.optim.SGD(model.parameters() ,lr=0.0001)
return model, optimizer
@slow
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
__lowerCamelCase : Dict = DDPMScheduler(
num_train_timesteps=1_0_0_0 ,beta_start=0.0001 ,beta_end=0.02 ,beta_schedule='linear' ,clip_sample=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Union[str, Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_start=0.0001 ,beta_end=0.02 ,beta_schedule='linear' ,clip_sample=SCREAMING_SNAKE_CASE__ ,)
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0)
__lowerCamelCase : Optional[int] = [torch.randn((4, 3, 3_2, 3_2)).clip(-1 ,1).to(SCREAMING_SNAKE_CASE__) for _ in range(4)]
__lowerCamelCase : int = [torch.randn((4, 3, 3_2, 3_2)).to(SCREAMING_SNAKE_CASE__) for _ in range(4)]
__lowerCamelCase : str = [torch.randint(0 ,1_0_0_0 ,(4,)).long().to(SCREAMING_SNAKE_CASE__) for _ in range(4)]
# train with a DDPM scheduler
__lowerCamelCase , __lowerCamelCase : str = self.get_model_optimizer(resolution=3_2)
model.train().to(SCREAMING_SNAKE_CASE__)
for i in range(4):
optimizer.zero_grad()
__lowerCamelCase : List[str] = ddpm_scheduler.add_noise(clean_images[i] ,noise[i] ,timesteps[i])
__lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE__ ,timesteps[i]).sample
__lowerCamelCase : List[Any] = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ ,noise[i])
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
__lowerCamelCase , __lowerCamelCase : List[Any] = self.get_model_optimizer(resolution=3_2)
model.train().to(SCREAMING_SNAKE_CASE__)
for i in range(4):
optimizer.zero_grad()
__lowerCamelCase : Dict = ddim_scheduler.add_noise(clean_images[i] ,noise[i] ,timesteps[i])
__lowerCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE__ ,timesteps[i]).sample
__lowerCamelCase : str = torch.nn.functional.mse_loss(SCREAMING_SNAKE_CASE__ ,noise[i])
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1E-5))
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,atol=1E-5))
| 652 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = cva.getAffineTransform(lowerCamelCase__ , lowerCamelCase__ )
return cva.warpAffine(lowerCamelCase__ , lowerCamelCase__ , (rows, cols) )
if __name__ == "__main__":
# read original image
a =cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
a =cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a , a =gray_img.shape
# set different points to rotate image
a =np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
a =np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
a =np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
a =np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
a =[
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a =plt.figure(1)
a =["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 652 | 1 |
a =[4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a =[3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
a ={
0: """Sunday""",
1: """Monday""",
2: """Tuesday""",
3: """Wednesday""",
4: """Thursday""",
5: """Friday""",
6: """Saturday""",
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
assert len(str(lowerCamelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 1_2, "month should be between 1 to 12"
assert 1 <= day <= 3_1, "day should be between 1 to 31"
# Doomsday algorithm:
__lowerCamelCase : Tuple = year // 1_0_0
__lowerCamelCase : int = (5 * (century % 4) + 2) % 7
__lowerCamelCase : List[str] = year % 1_0_0
__lowerCamelCase : int = centurian % 1_2
__lowerCamelCase : List[str] = (
(centurian // 1_2) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__lowerCamelCase : Any = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_0_0) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__lowerCamelCase : List[str] = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCamelCase__ )
if number < 1:
__lowerCamelCase : int = F"Input value of [number={number}] must be > 0"
raise ValueError(lowerCamelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__lowerCamelCase : Any = int(math.log(number // 3 , 2 ) ) + 2
__lowerCamelCase : List[Any] = [3, 5]
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : List[str] = 3
for block in range(1 , lowerCamelCase__ ):
for _ in range(lowerCamelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a =0
try:
a =proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 652 | 1 |
from string import ascii_uppercase
a ={str(ord(c) - 55): c for c in ascii_uppercase}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> str:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 3_6:
raise ValueError('base must be <= 36' )
__lowerCamelCase : Optional[Any] = ''
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 0
while div != 1:
__lowerCamelCase , __lowerCamelCase : Optional[int] = divmod(lowerCamelCase__ , lowerCamelCase__ )
if base >= 1_1 and 9 < mod < 3_6:
__lowerCamelCase : Any = ALPHABET_VALUES[str(lowerCamelCase__ )]
else:
__lowerCamelCase : List[str] = str(lowerCamelCase__ )
new_value += actual_value
__lowerCamelCase : Any = num // base
__lowerCamelCase : Any = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(lowerCamelCase__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 652 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str=7 ,SCREAMING_SNAKE_CASE__ : Any=3 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=3_0 ,SCREAMING_SNAKE_CASE__ : int=4_0_0 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=None ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : Dict=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : List[str]=1 / 2_5_5 ,SCREAMING_SNAKE_CASE__ : Tuple=True ,):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCamelCase : List[Any] = size if size is not None else {'shortest_edge': 1_8, 'longest_edge': 1_3_3_3}
__lowerCamelCase : str = parent
__lowerCamelCase : Union[str, Any] = batch_size
__lowerCamelCase : int = num_channels
__lowerCamelCase : Dict = min_resolution
__lowerCamelCase : Tuple = max_resolution
__lowerCamelCase : Dict = do_resize
__lowerCamelCase : List[Any] = size
__lowerCamelCase : Tuple = do_normalize
__lowerCamelCase : Any = image_mean
__lowerCamelCase : List[str] = image_std
__lowerCamelCase : List[Any] = do_rescale
__lowerCamelCase : str = rescale_factor
__lowerCamelCase : Tuple = do_pad
def lowerCAmelCase ( self : Dict):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : List[str]=False):
if not batched:
__lowerCamelCase : Optional[Any] = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE__ ,Image.Image):
__lowerCamelCase , __lowerCamelCase : Any = image.size
else:
__lowerCamelCase , __lowerCamelCase : Any = image.shape[1], image.shape[2]
if w < h:
__lowerCamelCase : Optional[int] = int(self.size['shortest_edge'] * h / w)
__lowerCamelCase : Tuple = self.size['shortest_edge']
elif w > h:
__lowerCamelCase : Union[str, Any] = self.size['shortest_edge']
__lowerCamelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h)
else:
__lowerCamelCase : List[Any] = self.size['shortest_edge']
__lowerCamelCase : Optional[int] = self.size['shortest_edge']
else:
__lowerCamelCase : List[str] = []
for image in image_inputs:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
__lowerCamelCase : Tuple = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[0])[0]
__lowerCamelCase : Dict = max(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[int] = DetaImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = DetaImageProcessingTester(self)
@property
def lowerCAmelCase ( self : Any):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Any = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_mean'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'image_std'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_normalize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_resize'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_rescale'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'do_pad'))
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'size'))
def lowerCAmelCase ( self : str):
__lowerCamelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size ,{'shortest_edge': 1_8, 'longest_edge': 1_3_3_3})
self.assertEqual(image_processor.do_pad ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Any):
pass
def lowerCAmelCase ( self : List[str]):
# Initialize image_processing
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image)
# Test not batched input
__lowerCamelCase : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Tuple = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase ( self : str):
# Initialize image_processing
__lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray)
# Test not batched input
__lowerCamelCase : Tuple = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase : str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def lowerCAmelCase ( self : int):
# Initialize image_processing
__lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCamelCase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__)
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor)
# Test not batched input
__lowerCamelCase : Union[str, Any] = image_processing(image_inputs[0] ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
__lowerCamelCase : List[Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='pt').pixel_values
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE__ ,batched=SCREAMING_SNAKE_CASE__)
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def lowerCAmelCase ( self : Optional[Any]):
# prepare image and target
__lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r') as f:
__lowerCamelCase : List[str] = json.loads(f.read())
__lowerCamelCase : Union[str, Any] = {'image_id': 3_9_7_6_9, 'annotations': target}
# encode them
__lowerCamelCase : Optional[int] = DetaImageProcessor()
__lowerCamelCase : int = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
# verify pixel values
__lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
# verify area
__lowerCamelCase : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__))
# verify boxes
__lowerCamelCase : int = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# verify image_id
__lowerCamelCase : Tuple = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__))
# verify is_crowd
__lowerCamelCase : Optional[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__))
# verify class_labels
__lowerCamelCase : List[Any] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__))
# verify orig_size
__lowerCamelCase : str = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__))
# verify size
__lowerCamelCase : int = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
@slow
def lowerCAmelCase ( self : str):
# prepare image, target and masks_path
__lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r') as f:
__lowerCamelCase : Tuple = json.loads(f.read())
__lowerCamelCase : List[Any] = {'file_name': '000000039769.png', 'image_id': 3_9_7_6_9, 'segments_info': target}
__lowerCamelCase : Optional[int] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic')
# encode them
__lowerCamelCase : List[str] = DetaImageProcessor(format='coco_panoptic')
__lowerCamelCase : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE__ ,annotations=SCREAMING_SNAKE_CASE__ ,masks_path=SCREAMING_SNAKE_CASE__ ,return_tensors='pt')
# verify pixel values
__lowerCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6])
self.assertEqual(encoding['pixel_values'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor([0.2796, 0.3138, 0.3481])
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
# verify area
__lowerCamelCase : Optional[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147])
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,SCREAMING_SNAKE_CASE__))
# verify boxes
__lowerCamelCase : Tuple = torch.Size([6, 4])
self.assertEqual(encoding['labels'][0]['boxes'].shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625])
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,SCREAMING_SNAKE_CASE__ ,atol=1E-3))
# verify image_id
__lowerCamelCase : int = torch.tensor([3_9_7_6_9])
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,SCREAMING_SNAKE_CASE__))
# verify is_crowd
__lowerCamelCase : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,SCREAMING_SNAKE_CASE__))
# verify class_labels
__lowerCamelCase : int = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3])
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,SCREAMING_SNAKE_CASE__))
# verify masks
__lowerCamelCase : Optional[Any] = 8_2_2_8_7_3
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,SCREAMING_SNAKE_CASE__)
# verify orig_size
__lowerCamelCase : Any = torch.tensor([4_8_0, 6_4_0])
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,SCREAMING_SNAKE_CASE__))
# verify size
__lowerCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6])
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,SCREAMING_SNAKE_CASE__))
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a ={"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A_ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = KandinskyVaaControlnetPipeline
_UpperCAmelCase : Optional[Any] = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : int = ['''image_embeds''', '''negative_image_embeds''', '''hint''']
_UpperCAmelCase : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase : Tuple = False
@property
def lowerCAmelCase ( self : Tuple):
return 3_2
@property
def lowerCAmelCase ( self : List[Any]):
return 3_2
@property
def lowerCAmelCase ( self : str):
return self.time_input_dim
@property
def lowerCAmelCase ( self : List[str]):
return self.time_input_dim * 4
@property
def lowerCAmelCase ( self : List[str]):
return 1_0_0
@property
def lowerCAmelCase ( self : Dict):
torch.manual_seed(0)
__lowerCamelCase : Optional[Any] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCamelCase : Union[str, Any] = UNetaDConditionModel(**SCREAMING_SNAKE_CASE__)
return model
@property
def lowerCAmelCase ( self : Union[str, Any]):
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase ( self : Optional[Any]):
torch.manual_seed(0)
__lowerCamelCase : int = VQModel(**self.dummy_movq_kwargs)
return model
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Tuple = self.dummy_unet
__lowerCamelCase : List[Any] = self.dummy_movq
__lowerCamelCase : str = DDIMScheduler(
num_train_timesteps=1_0_0_0 ,beta_schedule='linear' ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=SCREAMING_SNAKE_CASE__ ,set_alpha_to_one=SCREAMING_SNAKE_CASE__ ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Dict = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int]=0):
__lowerCamelCase : str = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1)).to(
SCREAMING_SNAKE_CASE__)
# create hint
__lowerCamelCase : Optional[int] = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(SCREAMING_SNAKE_CASE__)).to(SCREAMING_SNAKE_CASE__)
if str(SCREAMING_SNAKE_CASE__).startswith('mps'):
__lowerCamelCase : int = torch.manual_seed(SCREAMING_SNAKE_CASE__)
else:
__lowerCamelCase : int = torch.Generator(device=SCREAMING_SNAKE_CASE__).manual_seed(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : Dict = 'cpu'
__lowerCamelCase : Tuple = self.get_dummy_components()
__lowerCamelCase : Any = self.pipeline_class(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = pipe.to(SCREAMING_SNAKE_CASE__)
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = pipe(**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : int = output.images
__lowerCamelCase : Tuple = pipe(
**self.get_dummy_inputs(SCREAMING_SNAKE_CASE__) ,return_dict=SCREAMING_SNAKE_CASE__ ,)[0]
__lowerCamelCase : Dict = image[0, -3:, -3:, -1]
__lowerCamelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__lowerCamelCase : List[str] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595])
assert (
np.abs(image_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : int):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy')
__lowerCamelCase : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png')
__lowerCamelCase : Tuple = torch.from_numpy(np.array(SCREAMING_SNAKE_CASE__)).float() / 255.0
__lowerCamelCase : str = hint.permute(2 ,0 ,1).unsqueeze(0)
__lowerCamelCase : Tuple = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa)
pipe_prior.to(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa)
__lowerCamelCase : int = pipeline.to(SCREAMING_SNAKE_CASE__)
pipeline.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = 'A robot, 4k photo'
__lowerCamelCase : List[str] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase , __lowerCamelCase : Optional[Any] = pipe_prior(
SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple()
__lowerCamelCase : Optional[Any] = torch.Generator(device='cuda').manual_seed(0)
__lowerCamelCase : Any = pipeline(
image_embeds=SCREAMING_SNAKE_CASE__ ,negative_image_embeds=SCREAMING_SNAKE_CASE__ ,hint=SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,num_inference_steps=1_0_0 ,output_type='np' ,)
__lowerCamelCase : List[Any] = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
| 652 | 1 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
# Construct model
if openai_config_file == "":
__lowerCamelCase : List[str] = OpenAIGPTConfig()
else:
__lowerCamelCase : Optional[Any] = OpenAIGPTConfig.from_json_file(lowerCamelCase__ )
__lowerCamelCase : Optional[Any] = OpenAIGPTModel(lowerCamelCase__ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
__lowerCamelCase : List[str] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__lowerCamelCase : List[str] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , lowerCamelCase__ )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--openai_checkpoint_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the TensorFlow checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--openai_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
a =parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 652 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class A_ :
_UpperCAmelCase : int = XGLMConfig
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : Tuple = '''gelu'''
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int]=1_4 ,SCREAMING_SNAKE_CASE__ : Tuple=7 ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Any=True ,SCREAMING_SNAKE_CASE__ : Optional[Any]=True ,SCREAMING_SNAKE_CASE__ : Optional[int]=9_9 ,SCREAMING_SNAKE_CASE__ : str=3_2 ,SCREAMING_SNAKE_CASE__ : Tuple=2 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=4 ,SCREAMING_SNAKE_CASE__ : Tuple=3_7 ,SCREAMING_SNAKE_CASE__ : Tuple="gelu" ,SCREAMING_SNAKE_CASE__ : Any=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=5_1_2 ,SCREAMING_SNAKE_CASE__ : str=0.02 ,):
__lowerCamelCase : List[str] = parent
__lowerCamelCase : List[str] = batch_size
__lowerCamelCase : str = seq_length
__lowerCamelCase : Optional[Any] = is_training
__lowerCamelCase : Any = use_input_mask
__lowerCamelCase : str = use_labels
__lowerCamelCase : Any = vocab_size
__lowerCamelCase : Dict = d_model
__lowerCamelCase : int = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : List[str] = ffn_dim
__lowerCamelCase : Optional[Any] = activation_function
__lowerCamelCase : Tuple = activation_dropout
__lowerCamelCase : Union[str, Any] = attention_dropout
__lowerCamelCase : List[str] = max_position_embeddings
__lowerCamelCase : List[Any] = initializer_range
__lowerCamelCase : Any = None
__lowerCamelCase : List[str] = 0
__lowerCamelCase : List[str] = 2
__lowerCamelCase : Dict = 1
def lowerCAmelCase ( self : Any):
return XGLMConfig.from_pretrained('facebook/xglm-564M')
def lowerCAmelCase ( self : str):
__lowerCamelCase : Any = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size) ,clip_value_min=0 ,clip_value_max=3)
__lowerCamelCase : Dict = None
if self.use_input_mask:
__lowerCamelCase : int = random_attention_mask([self.batch_size, self.seq_length])
__lowerCamelCase : int = self.get_config()
__lowerCamelCase : Union[str, Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] ,2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowerCAmelCase ( self : List[Any]):
return XGLMConfig(
vocab_size=self.vocab_size ,d_model=self.hidden_size ,num_layers=self.num_hidden_layers ,attention_heads=self.num_attention_heads ,ffn_dim=self.ffn_dim ,activation_function=self.activation_function ,activation_dropout=self.activation_dropout ,attention_dropout=self.attention_dropout ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,use_cache=SCREAMING_SNAKE_CASE__ ,bos_token_id=self.bos_token_id ,eos_token_id=self.eos_token_id ,pad_token_id=self.pad_token_id ,return_dict=SCREAMING_SNAKE_CASE__ ,)
def lowerCAmelCase ( self : int):
__lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : Any = config_and_inputs
__lowerCamelCase : str = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
_UpperCAmelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
_UpperCAmelCase : str = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Union[str, Any] = False
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : Tuple = TFXGLMModelTester(self)
__lowerCamelCase : int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,n_embd=3_7)
def lowerCAmelCase ( self : List[Any]):
self.config_tester.run_common_tests()
@slow
def lowerCAmelCase ( self : str):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.')
def lowerCAmelCase ( self : Union[str, Any]):
super().test_resize_token_embeddings()
@require_tf
class A_ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True):
__lowerCamelCase : Any = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : int = tf.convert_to_tensor([[2, 2_6_8, 9_8_6_5]] ,dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : Optional[Any] = [2, 2_6_8, 9_8_6_5, 6_7, 1_1, 1_9_8_8, 5_7_2_5_2, 9_8_6_5, 5, 9_8_4, 6_7, 1_9_8_8, 2_1_3_8_3_8, 1_6_5_8, 5_3, 7_0_4_4_6, 3_3, 6_6_5_7, 2_7_8, 1_5_8_1]
# fmt: on
__lowerCamelCase : int = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : List[str]):
__lowerCamelCase : Tuple = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : int = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
tf.random.set_seed(0)
__lowerCamelCase : Optional[Any] = tokenizer('Today is a nice day and' ,return_tensors='tf')
__lowerCamelCase : List[Any] = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0'):
__lowerCamelCase : Union[str, Any] = model.generate(SCREAMING_SNAKE_CASE__ ,do_sample=SCREAMING_SNAKE_CASE__ ,seed=[7, 0])
__lowerCamelCase : List[str] = tokenizer.decode(output_ids[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
@slow
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Union[str, Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : Union[str, Any] = XGLMTokenizer.from_pretrained('facebook/xglm-564M')
__lowerCamelCase : Union[str, Any] = 'left'
# use different length sentences to test batching
__lowerCamelCase : List[str] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ ,return_tensors='tf' ,padding=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = inputs['input_ids']
__lowerCamelCase : Dict = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,attention_mask=inputs['attention_mask'] ,max_new_tokens=1_2)
__lowerCamelCase : Tuple = tokenizer(sentences[0] ,return_tensors='tf').input_ids
__lowerCamelCase : List[str] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2)
__lowerCamelCase : Any = tokenizer(sentences[1] ,return_tensors='tf').input_ids
__lowerCamelCase : List[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE__ ,max_new_tokens=1_2)
__lowerCamelCase : int = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tokenizer.decode(output_non_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tokenizer.decode(output_padded[0] ,skip_special_tokens=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,[non_padded_sentence, padded_sentence])
| 652 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a ={"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 652 | 1 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
a =logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[int]:
print('Loading config file...' )
def flatten_yaml_as_dict(lowerCamelCase__ , lowerCamelCase__="" , lowerCamelCase__="." ):
__lowerCamelCase : List[str] = []
for k, v in d.items():
__lowerCamelCase : Union[str, Any] = parent_key + sep + k if parent_key else k
if isinstance(lowerCamelCase__ , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCamelCase__ , lowerCamelCase__ , sep=lowerCamelCase__ ).items() )
else:
items.append((new_key, v) )
return dict(lowerCamelCase__ )
__lowerCamelCase : List[str] = argparse.Namespace()
with open(lowerCamelCase__ , 'r' ) as yaml_file:
try:
__lowerCamelCase : Optional[int] = yaml.load(lowerCamelCase__ , Loader=yaml.FullLoader )
__lowerCamelCase : Dict = flatten_yaml_as_dict(lowerCamelCase__ )
for k, v in flat_cfg.items():
setattr(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(lowerCamelCase__ , str(lowerCamelCase__ ) ) )
return config
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : str = MobileViTVaConfig()
__lowerCamelCase : Union[str, Any] = False
# dataset
if task_name.startswith('imagenet1k_' ):
__lowerCamelCase : List[Any] = 1_0_0_0
if int(task_name.strip().split('_' )[-1] ) == 3_8_4:
__lowerCamelCase : Any = 3_8_4
else:
__lowerCamelCase : List[Any] = 2_5_6
__lowerCamelCase : str = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
__lowerCamelCase : Any = 2_1_0_0_0
if int(task_name.strip().split('_' )[-1] ) == 3_8_4:
__lowerCamelCase : Dict = 3_8_4
else:
__lowerCamelCase : List[str] = 2_5_6
__lowerCamelCase : Dict = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
__lowerCamelCase : Optional[int] = 1_5_1
__lowerCamelCase : Optional[int] = 5_1_2
__lowerCamelCase : Optional[Any] = 'ade20k-id2label.json'
__lowerCamelCase : Optional[int] = True
elif task_name.startswith('voc_' ):
__lowerCamelCase : int = 2_1
__lowerCamelCase : Tuple = 5_1_2
__lowerCamelCase : Tuple = 'pascal-voc-id2label.json'
__lowerCamelCase : Union[str, Any] = True
# orig_config
__lowerCamelCase : Any = load_orig_config_file(lowerCamelCase__ )
assert getattr(lowerCamelCase__ , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
__lowerCamelCase : List[Any] = getattr(lowerCamelCase__ , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(lowerCamelCase__ , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__lowerCamelCase : List[str] = getattr(lowerCamelCase__ , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__lowerCamelCase : List[Any] = getattr(lowerCamelCase__ , 'model.segmentation.output_stride' , 1_6 )
if "_deeplabv3" in task_name:
__lowerCamelCase : Any = getattr(lowerCamelCase__ , 'model.segmentation.deeplabv3.aspp_rates' , [1_2, 2_4, 3_6] )
__lowerCamelCase : Union[str, Any] = getattr(lowerCamelCase__ , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_1_2 )
__lowerCamelCase : Optional[Any] = getattr(lowerCamelCase__ , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
__lowerCamelCase : str = 'huggingface/label-files'
__lowerCamelCase : List[str] = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCamelCase : Any = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
__lowerCamelCase : int = idalabel
__lowerCamelCase : Optional[int] = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : int = dct.pop(lowerCamelCase__ )
__lowerCamelCase : Optional[int] = val
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__=False ) -> Dict:
if base_model:
__lowerCamelCase : Optional[int] = ''
else:
__lowerCamelCase : Dict = 'mobilevitv2.'
__lowerCamelCase : str = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__lowerCamelCase : Tuple = k[8:]
else:
__lowerCamelCase : Optional[Any] = k
if ".block." in k:
__lowerCamelCase : Union[str, Any] = k_new.replace('.block.' , '.' )
if ".conv." in k:
__lowerCamelCase : str = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
__lowerCamelCase : str = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
__lowerCamelCase : Optional[int] = k_new.replace('conv_1.' , F"{model_prefix}conv_stem." )
for i in [1, 2]:
if F"layer_{i}." in k:
__lowerCamelCase : List[Any] = k_new.replace(F"layer_{i}." , F"{model_prefix}encoder.layer.{i-1}.layer." )
if ".exp_1x1." in k:
__lowerCamelCase : Optional[Any] = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
__lowerCamelCase : Optional[int] = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F"layer_{i}.0." in k:
__lowerCamelCase : int = k_new.replace(F"layer_{i}.0." , F"{model_prefix}encoder.layer.{i-1}.downsampling_layer." )
if F"layer_{i}.1.local_rep.0." in k:
__lowerCamelCase : int = k_new.replace(F"layer_{i}.1.local_rep.0." , F"{model_prefix}encoder.layer.{i-1}.conv_kxk." )
if F"layer_{i}.1.local_rep.1." in k:
__lowerCamelCase : Union[str, Any] = k_new.replace(F"layer_{i}.1.local_rep.1." , F"{model_prefix}encoder.layer.{i-1}.conv_1x1." )
for i in [3, 4, 5]:
if i == 3:
__lowerCamelCase : Any = [0, 1]
elif i == 4:
__lowerCamelCase : Optional[Any] = [0, 1, 2, 3]
elif i == 5:
__lowerCamelCase : Optional[int] = [0, 1, 2]
for j in j_in:
if F"layer_{i}.1.global_rep.{j}." in k:
__lowerCamelCase : Any = k_new.replace(
F"layer_{i}.1.global_rep.{j}." , F"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." )
if F"layer_{i}.1.global_rep.{j+1}." in k:
__lowerCamelCase : List[str] = k_new.replace(
F"layer_{i}.1.global_rep.{j+1}." , F"{model_prefix}encoder.layer.{i-1}.layernorm." )
if F"layer_{i}.1.conv_proj." in k:
__lowerCamelCase : Union[str, Any] = k_new.replace(F"layer_{i}.1.conv_proj." , F"{model_prefix}encoder.layer.{i-1}.conv_projection." )
if "pre_norm_attn.0." in k:
__lowerCamelCase : Union[str, Any] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
__lowerCamelCase : List[Any] = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
__lowerCamelCase : Dict = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
__lowerCamelCase : Optional[Any] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
__lowerCamelCase : List[Any] = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
__lowerCamelCase : Union[str, Any] = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
__lowerCamelCase : Optional[Any] = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
__lowerCamelCase : Dict = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
__lowerCamelCase : Optional[int] = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : Optional[int] = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(lowerCamelCase__ )
for k in keys_to_ignore:
state_dict.pop(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
__lowerCamelCase : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__lowerCamelCase : List[str] = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : Union[str, Any] = get_mobilevitva_config(lowerCamelCase__ , lowerCamelCase__ )
# load original state_dict
__lowerCamelCase : Any = torch.load(lowerCamelCase__ , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
__lowerCamelCase : Dict = MobileViTVaForSemanticSegmentation(lowerCamelCase__ ).eval()
__lowerCamelCase : List[Any] = False
else:
__lowerCamelCase : int = MobileViTVaForImageClassification(lowerCamelCase__ ).eval()
__lowerCamelCase : List[str] = False
# remove and rename some keys of load the original model
__lowerCamelCase : Optional[Any] = checkpoint
remove_unused_keys(lowerCamelCase__ )
__lowerCamelCase : List[Any] = create_rename_keys(lowerCamelCase__ , base_model=lowerCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# load modified state_dict
model.load_state_dict(lowerCamelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__lowerCamelCase : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 3_2 )
__lowerCamelCase : List[Any] = image_processor(images=prepare_img() , return_tensors='pt' )
__lowerCamelCase : str = model(**lowerCamelCase__ )
# verify classification model
if task_name.startswith('imagenet' ):
__lowerCamelCase : Tuple = outputs.logits
__lowerCamelCase : List[Any] = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__lowerCamelCase : List[str] = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] , lowerCamelCase__ , atol=1e-4 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(F"Saving model {task_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase__ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
a =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--task""",
default="""imagenet1k_256""",
type=str,
help=(
"""Name of the task for which the MobileViTV2 model you'd like to convert is trained on . """
"""
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
"""
),
choices=[
"""imagenet1k_256""",
"""imagenet1k_384""",
"""imagenet21k_to_1k_256""",
"""imagenet21k_to_1k_384""",
"""ade20k_deeplabv3""",
"""voc_deeplabv3""",
],
)
parser.add_argument(
"""--orig_checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument("""--orig_config_path""", required=True, type=str, help="""Path to the original config file.""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
a =parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 652 |
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
a =logging.get_logger(__name__)
# General docstring
a ="""RegNetConfig"""
# Base docstring
a ="""facebook/regnet-y-040"""
a =[1, 1088, 7, 7]
# Image classification docstring
a ="""facebook/regnet-y-040"""
a ="""tabby, tabby cat"""
a =[
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 3 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : int = 1 ,SCREAMING_SNAKE_CASE__ : Optional[str] = "relu" ,**SCREAMING_SNAKE_CASE__ : Optional[int] ,):
super().__init__(**SCREAMING_SNAKE_CASE__)
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
__lowerCamelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2)
__lowerCamelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=SCREAMING_SNAKE_CASE__ ,strides=SCREAMING_SNAKE_CASE__ ,padding='VALID' ,groups=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution' ,)
__lowerCamelCase : int = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
__lowerCamelCase : Union[str, Any] = ACTaFN[activation] if activation is not None else tf.identity
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : List[Any] = self.convolution(self.padding(SCREAMING_SNAKE_CASE__))
__lowerCamelCase : Union[str, Any] = self.normalization(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : str ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Dict):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = config.num_channels
__lowerCamelCase : Dict = TFRegNetConvLayer(
out_channels=config.embedding_size ,kernel_size=3 ,stride=2 ,activation=config.hidden_act ,name='embedder' ,)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any]):
__lowerCamelCase : Optional[int] = shape_list(SCREAMING_SNAKE_CASE__)[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.')
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
__lowerCamelCase : Optional[int] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 2, 3, 1))
__lowerCamelCase : List[Any] = self.embedder(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = tf.keras.layers.ConvaD(
filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,strides=SCREAMING_SNAKE_CASE__ ,use_bias=SCREAMING_SNAKE_CASE__ ,name='convolution')
__lowerCamelCase : Optional[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 ,momentum=0.9 ,name='normalization')
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False):
return self.normalization(self.convolution(SCREAMING_SNAKE_CASE__) ,training=SCREAMING_SNAKE_CASE__)
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
__lowerCamelCase : Dict = [
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='relu' ,name='attention.0'),
tf.keras.layers.ConvaD(filters=SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation='sigmoid' ,name='attention.2'),
]
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
# [batch_size, h, w, num_channels] -> [batch_size, 1, 1, num_channels]
__lowerCamelCase : Optional[Any] = self.pooler(SCREAMING_SNAKE_CASE__)
for layer_module in self.attention:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = hidden_state * pooled
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[Any]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Tuple = in_channels != out_channels or stride != 1
__lowerCamelCase : Union[str, Any] = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : Dict = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.2'),
]
__lowerCamelCase : Dict = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[int]):
__lowerCamelCase : int = hidden_state
for layer_module in self.layers:
__lowerCamelCase : List[str] = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : int = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 1 ,**SCREAMING_SNAKE_CASE__ : List[str]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = in_channels != out_channels or stride != 1
__lowerCamelCase : Tuple = max(1 ,out_channels // config.groups_width)
__lowerCamelCase : int = (
TFRegNetShortCut(SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='shortcut')
if should_apply_shortcut
else tf.keras.layers.Activation('linear' ,name='shortcut')
)
__lowerCamelCase : Optional[int] = [
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=config.hidden_act ,name='layer.0'),
TFRegNetConvLayer(
SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,groups=SCREAMING_SNAKE_CASE__ ,activation=config.hidden_act ,name='layer.1'),
TFRegNetSELayer(SCREAMING_SNAKE_CASE__ ,reduced_channels=int(round(in_channels / 4)) ,name='layer.2'),
TFRegNetConvLayer(SCREAMING_SNAKE_CASE__ ,kernel_size=1 ,activation=SCREAMING_SNAKE_CASE__ ,name='layer.3'),
]
__lowerCamelCase : List[Any] = ACTaFN[config.hidden_act]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Optional[int] = hidden_state
for layer_module in self.layers:
__lowerCamelCase : Dict = layer_module(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = self.shortcut(SCREAMING_SNAKE_CASE__)
hidden_state += residual
__lowerCamelCase : Any = self.activation(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : int ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int = 2 ,SCREAMING_SNAKE_CASE__ : int = 2 ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
__lowerCamelCase : Tuple = [
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,name='layers.0'),
*[layer(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,name=F"layers.{i+1}") for i in range(depth - 1)],
]
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any]):
for layer_module in self.layers:
__lowerCamelCase : Any = layer_module(SCREAMING_SNAKE_CASE__)
return hidden_state
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,**SCREAMING_SNAKE_CASE__ : Any):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
SCREAMING_SNAKE_CASE__ ,config.embedding_size ,config.hidden_sizes[0] ,stride=2 if config.downsample_in_first_stage else 1 ,depth=config.depths[0] ,name='stages.0' ,))
__lowerCamelCase : Optional[int] = zip(config.hidden_sizes ,config.hidden_sizes[1:])
for i, ((in_channels, out_channels), depth) in enumerate(zip(SCREAMING_SNAKE_CASE__ ,config.depths[1:])):
self.stages.append(TFRegNetStage(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,depth=SCREAMING_SNAKE_CASE__ ,name=F"stages.{i+1}"))
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True):
__lowerCamelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCamelCase : Optional[Any] = hidden_states + (hidden_state,)
__lowerCamelCase : str = stage_module(SCREAMING_SNAKE_CASE__)
if output_hidden_states:
__lowerCamelCase : Tuple = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None)
return TFBaseModelOutputWithNoAttention(last_hidden_state=SCREAMING_SNAKE_CASE__ ,hidden_states=SCREAMING_SNAKE_CASE__)
@keras_serializable
class A_ ( tf.keras.layers.Layer ):
_UpperCAmelCase : List[Any] = RegNetConfig
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Optional[int]):
super().__init__(**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = config
__lowerCamelCase : Optional[int] = TFRegNetEmbeddings(SCREAMING_SNAKE_CASE__ ,name='embedder')
__lowerCamelCase : Union[str, Any] = TFRegNetEncoder(SCREAMING_SNAKE_CASE__ ,name='encoder')
__lowerCamelCase : int = tf.keras.layers.GlobalAveragePoolingaD(keepdims=SCREAMING_SNAKE_CASE__ ,name='pooler')
@unpack_inputs
def lowerCAmelCase ( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,):
__lowerCamelCase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Union[str, Any] = self.embedder(SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = self.encoder(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = encoder_outputs[0]
__lowerCamelCase : int = self.pooler(SCREAMING_SNAKE_CASE__)
# Change to NCHW output format have uniformity in the modules
__lowerCamelCase : Union[str, Any] = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
__lowerCamelCase : str = tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2))
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
__lowerCamelCase : Union[str, Any] = tuple([tf.transpose(SCREAMING_SNAKE_CASE__ ,perm=(0, 3, 1, 2)) for h in encoder_outputs[1]])
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ ,pooler_output=SCREAMING_SNAKE_CASE__ ,hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states ,)
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Any = RegNetConfig
_UpperCAmelCase : Optional[int] = '''regnet'''
_UpperCAmelCase : List[Any] = '''pixel_values'''
@property
def lowerCAmelCase ( self : int):
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 2_2_4, 2_2_4) ,dtype=tf.floataa)}
a =r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
a =r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : Tuple ,**SCREAMING_SNAKE_CASE__ : Tuple):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,modality='vision' ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : tf.Tensor ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : int=False ,):
__lowerCamelCase : List[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : Tuple = self.regnet(
pixel_values=SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__ ,)
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state ,pooler_output=outputs.pooler_output ,hidden_states=outputs.hidden_states ,)
@add_start_docstrings(
'''
RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : RegNetConfig ,*SCREAMING_SNAKE_CASE__ : List[str] ,**SCREAMING_SNAKE_CASE__ : str):
super().__init__(SCREAMING_SNAKE_CASE__ ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Union[str, Any] = config.num_labels
__lowerCamelCase : Union[str, Any] = TFRegNetMainLayer(SCREAMING_SNAKE_CASE__ ,name='regnet')
# classification head
__lowerCamelCase : Optional[Any] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels ,name='classifier.1') if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__)
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=SCREAMING_SNAKE_CASE__ ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : tf.Tensor = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : bool = None ,SCREAMING_SNAKE_CASE__ : Any=False ,):
__lowerCamelCase : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCamelCase : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCamelCase : str = self.regnet(
SCREAMING_SNAKE_CASE__ ,output_hidden_states=SCREAMING_SNAKE_CASE__ ,return_dict=SCREAMING_SNAKE_CASE__ ,training=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = outputs.pooler_output if return_dict else outputs[1]
__lowerCamelCase : Optional[Any] = self.classifier[0](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = self.classifier[1](SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = None if labels is None else self.hf_compute_loss(labels=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__)
if not return_dict:
__lowerCamelCase : Union[str, Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=SCREAMING_SNAKE_CASE__ ,logits=SCREAMING_SNAKE_CASE__ ,hidden_states=outputs.hidden_states)
| 652 | 1 |
from __future__ import annotations
from math import pow, sqrt
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if resistance == 0:
return {"resistance": sqrt(pow(lowerCamelCase__ , 2 ) - pow(lowerCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowerCamelCase__ , 2 ) - pow(lowerCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowerCamelCase__ , 2 ) + pow(lowerCamelCase__ , 2 ) )}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
a =object()
# For specifying empty leaf dict `{}`
a =object()
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : Tuple = tuple((re.compile(x + '$' ) for x in qs) )
for i in range(len(lowerCamelCase__ ) - len(lowerCamelCase__ ) + 1 ):
__lowerCamelCase : str = [x.match(lowerCamelCase__ ) for x, y in zip(lowerCamelCase__ , ks[i:] )]
if matches and all(lowerCamelCase__ ):
return True
return False
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Tuple:
def replace(lowerCamelCase__ , lowerCamelCase__ ):
for rule, replacement in rules:
if _match(lowerCamelCase__ , lowerCamelCase__ ):
return replacement
return val
return replace
def SCREAMING_SNAKE_CASE__ ( ) -> str:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('mp' , lowerCamelCase__ )),
(("transformer", "wte", "embedding"), P('mp' , lowerCamelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("attention", "out_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCamelCase__ , 'mp' )),
(("mlp", "c_fc", "bias"), P('mp' )),
(("mlp", "c_proj", "kernel"), P('mp' , lowerCamelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> str:
__lowerCamelCase : List[str] = _get_partition_rules()
__lowerCamelCase : Optional[Any] = _replacement_rules(lowerCamelCase__ )
__lowerCamelCase : Tuple = {k: _unmatched for k in flatten_dict(lowerCamelCase__ )}
__lowerCamelCase : List[Any] = {k: replace(lowerCamelCase__ , lowerCamelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCamelCase__ ) )
| 652 | 1 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
a =pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> int:
inspect_dataset(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
inspect_metric(lowerCamelCase__ , lowerCamelCase__ )
__lowerCamelCase : Optional[int] = path + '.py'
assert script_name in os.listdir(lowerCamelCase__ )
assert "__pycache__" not in os.listdir(lowerCamelCase__ )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : Any = get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
with pytest.raises(lowerCamelCase__ ):
get_dataset_config_info(lowerCamelCase__ , config_name=lowerCamelCase__ )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
__lowerCamelCase : Any = get_dataset_config_names(lowerCamelCase__ )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
__lowerCamelCase : str = get_dataset_infos(lowerCamelCase__ )
assert list(infos.keys() ) == expected_configs
__lowerCamelCase : int = expected_configs[0]
assert expected_config in infos
__lowerCamelCase : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
__lowerCamelCase : List[Any] = get_dataset_infos(lowerCamelCase__ )
assert expected_config in infos
__lowerCamelCase : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
with pytest.raises(lowerCamelCase__ ):
get_dataset_split_names(lowerCamelCase__ , config_name=lowerCamelCase__ )
| 652 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> list:
__lowerCamelCase : Union[str, Any] = [True] * n
__lowerCamelCase : List[Any] = False
__lowerCamelCase : int = False
__lowerCamelCase : Any = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__lowerCamelCase : List[str] = i * 2
while index < n:
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[Any] = index + i
__lowerCamelCase : Optional[Any] = [2]
for i in range(3 , lowerCamelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCamelCase__ )
return primes
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 9_9_9_9_6_6_6_6_3_3_3_3 ) -> int:
__lowerCamelCase : Optional[Any] = math.floor(math.sqrt(lowerCamelCase__ ) ) + 1_0_0
__lowerCamelCase : Dict = prime_sieve(lowerCamelCase__ )
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 0
__lowerCamelCase : Any = primes[prime_index]
while (last_prime**2) <= limit:
__lowerCamelCase : Any = primes[prime_index + 1]
__lowerCamelCase : Optional[Any] = last_prime**2
__lowerCamelCase : Dict = next_prime**2
# Get numbers divisible by lps(current)
__lowerCamelCase : Tuple = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__lowerCamelCase : Any = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__lowerCamelCase : List[Any] = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__lowerCamelCase : Dict = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 652 | 1 |
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
a ={
"""return_dict""": False,
"""output_hidden_states""": True,
"""output_attentions""": True,
"""torchscript""": True,
"""torch_dtype""": """float16""",
"""use_bfloat16""": True,
"""tf_legacy_loss""": True,
"""pruned_heads""": {"""a""": 1},
"""tie_word_embeddings""": False,
"""is_decoder""": True,
"""cross_attention_hidden_size""": 128,
"""add_cross_attention""": True,
"""tie_encoder_decoder""": True,
"""max_length""": 50,
"""min_length""": 3,
"""do_sample""": True,
"""early_stopping""": True,
"""num_beams""": 3,
"""num_beam_groups""": 3,
"""diversity_penalty""": 0.5,
"""temperature""": 2.0,
"""top_k""": 10,
"""top_p""": 0.7,
"""typical_p""": 0.2,
"""repetition_penalty""": 0.8,
"""length_penalty""": 0.8,
"""no_repeat_ngram_size""": 5,
"""encoder_no_repeat_ngram_size""": 5,
"""bad_words_ids""": [1, 2, 3],
"""num_return_sequences""": 3,
"""chunk_size_feed_forward""": 5,
"""output_scores""": True,
"""return_dict_in_generate""": True,
"""forced_bos_token_id""": 2,
"""forced_eos_token_id""": 3,
"""remove_invalid_values""": True,
"""architectures""": ["""BertModel"""],
"""finetuning_task""": """translation""",
"""id2label""": {0: """label"""},
"""label2id""": {"""label""": """0"""},
"""tokenizer_class""": """BertTokenizerFast""",
"""prefix""": """prefix""",
"""bos_token_id""": 6,
"""pad_token_id""": 7,
"""eos_token_id""": 8,
"""sep_token_id""": 9,
"""decoder_start_token_id""": 10,
"""exponential_decay_length_penalty""": (5, 1.01),
"""suppress_tokens""": [0, 1],
"""begin_suppress_tokens""": 2,
"""task_specific_params""": {"""translation""": """some_params"""},
"""problem_type""": """regression""",
}
@is_staging_test
class A_ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase ( cls : Union[str, Any]):
__lowerCamelCase : str = TOKEN
HfFolder.save_token(SCREAMING_SNAKE_CASE__)
@classmethod
def lowerCAmelCase ( cls : str):
try:
delete_repo(token=cls._token ,repo_id='test-config')
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='valid_org/test-config-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='test-dynamic-config')
except HTTPError:
pass
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : str = BertConfig(
vocab_size=9_9 ,hidden_size=3_2 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=3_7)
config.push_to_hub('test-config' ,use_auth_token=self._token)
__lowerCamelCase : Union[str, Any] = BertConfig.from_pretrained(F"{USER}/test-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ ,getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
# Reset repo
delete_repo(token=self._token ,repo_id='test-config')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(SCREAMING_SNAKE_CASE__ ,repo_id='test-config' ,push_to_hub=SCREAMING_SNAKE_CASE__ ,use_auth_token=self._token)
__lowerCamelCase : List[Any] = BertConfig.from_pretrained(F"{USER}/test-config")
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ ,getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Any):
__lowerCamelCase : Optional[Any] = BertConfig(
vocab_size=9_9 ,hidden_size=3_2 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=3_7)
config.push_to_hub('valid_org/test-config-org' ,use_auth_token=self._token)
__lowerCamelCase : List[str] = BertConfig.from_pretrained('valid_org/test-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ ,getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
# Reset repo
delete_repo(token=self._token ,repo_id='valid_org/test-config-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
SCREAMING_SNAKE_CASE__ ,repo_id='valid_org/test-config-org' ,push_to_hub=SCREAMING_SNAKE_CASE__ ,use_auth_token=self._token)
__lowerCamelCase : Optional[int] = BertConfig.from_pretrained('valid_org/test-config-org')
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(SCREAMING_SNAKE_CASE__ ,getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))
def lowerCAmelCase ( self : Tuple):
CustomConfig.register_for_auto_class()
__lowerCamelCase : Optional[Any] = CustomConfig(attribute=4_2)
config.push_to_hub('test-dynamic-config' ,use_auth_token=self._token)
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'AutoConfig': 'custom_configuration.CustomConfig'})
__lowerCamelCase : List[Any] = AutoConfig.from_pretrained(F"{USER}/test-dynamic-config" ,trust_remote_code=SCREAMING_SNAKE_CASE__)
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'CustomConfig')
self.assertEqual(new_config.attribute ,4_2)
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : str):
__lowerCamelCase : Dict = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
__lowerCamelCase : Tuple = c.n_embd + 1 # int
__lowerCamelCase : Dict = c.resid_pdrop + 1.0 # float
__lowerCamelCase : int = not c.scale_attn_weights # bool
__lowerCamelCase : int = c.summary_type + 'foo' # str
c.update_from_string(
F"n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}")
self.assertEqual(SCREAMING_SNAKE_CASE__ ,c.n_embd ,'mismatch for key: n_embd')
self.assertEqual(SCREAMING_SNAKE_CASE__ ,c.resid_pdrop ,'mismatch for key: resid_pdrop')
self.assertEqual(SCREAMING_SNAKE_CASE__ ,c.scale_attn_weights ,'mismatch for key: scale_attn_weights')
self.assertEqual(SCREAMING_SNAKE_CASE__ ,c.summary_type ,'mismatch for key: summary_type')
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[Any] = PretrainedConfig()
__lowerCamelCase : str = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
SCREAMING_SNAKE_CASE__ ,['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'])
__lowerCamelCase : Optional[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)]
if len(SCREAMING_SNAKE_CASE__) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
F" {', '.join(SCREAMING_SNAKE_CASE__)}.")
def lowerCAmelCase ( self : str):
with self.assertRaises(SCREAMING_SNAKE_CASE__):
# config is in subfolder, the following should not work without specifying the subfolder
__lowerCamelCase : str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder')
__lowerCamelCase : str = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' ,subfolder='bert')
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
# A mock response for an HTTP head request to emulate server down
__lowerCamelCase : Any = mock.Mock()
__lowerCamelCase : Tuple = 5_0_0
__lowerCamelCase : str = {}
__lowerCamelCase : Union[str, Any] = HTTPError
__lowerCamelCase : List[Any] = {}
# Download this model to make sure it's in the cache.
__lowerCamelCase : Optional[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert')
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' ,return_value=SCREAMING_SNAKE_CASE__) as mock_head:
__lowerCamelCase : List[Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert')
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase ( self : Dict):
# This test is for deprecated behavior and can be removed in v5
__lowerCamelCase : Tuple = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json')
def lowerCAmelCase ( self : Union[str, Any]):
__lowerCamelCase : str = AutoConfig.from_pretrained('bert-base-cased')
__lowerCamelCase : Union[str, Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = 2
json.dump(configuration.to_dict() ,open(os.path.join(SCREAMING_SNAKE_CASE__ ,'config.4.0.0.json') ,'w'))
# This should pick the new configuration file as the version of Transformers is > 4.0.0
__lowerCamelCase : str = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertEqual(new_configuration.hidden_size ,2)
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
__lowerCamelCase : List[str] = ['config.42.0.0.json']
__lowerCamelCase : List[Any] = 7_6_8
configuration.save_pretrained(SCREAMING_SNAKE_CASE__)
shutil.move(os.path.join(SCREAMING_SNAKE_CASE__ ,'config.4.0.0.json') ,os.path.join(SCREAMING_SNAKE_CASE__ ,'config.42.0.0.json'))
__lowerCamelCase : str = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertEqual(new_configuration.hidden_size ,7_6_8)
def lowerCAmelCase ( self : int):
# This repo has two configuration files, one for v4.0.0 and above with a different hidden size.
__lowerCamelCase : Optional[int] = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
__lowerCamelCase : List[str] = 'v4.0.0'
__lowerCamelCase , __lowerCamelCase : List[str] = new_transformers.models.auto.AutoConfig.from_pretrained(
SCREAMING_SNAKE_CASE__ ,return_unused_kwargs=SCREAMING_SNAKE_CASE__)
self.assertEqual(new_configuration.hidden_size ,2)
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(SCREAMING_SNAKE_CASE__ ,{})
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
__lowerCamelCase : Union[str, Any] = 'v3.0.0'
__lowerCamelCase : Dict = old_transformers.models.auto.AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertEqual(old_configuration.hidden_size ,7_6_8)
| 652 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : torch.FloatTensor
_UpperCAmelCase : torch.FloatTensor
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = 1
@register_to_config
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : int = 2_0_0_0 ,SCREAMING_SNAKE_CASE__ : float = 0.15 ,SCREAMING_SNAKE_CASE__ : float = 0.01 ,SCREAMING_SNAKE_CASE__ : float = 1348.0 ,SCREAMING_SNAKE_CASE__ : float = 1E-5 ,SCREAMING_SNAKE_CASE__ : int = 1 ,):
# standard deviation of the initial noise distribution
__lowerCamelCase : int = sigma_max
# setable values
__lowerCamelCase : List[str] = None
self.set_sigmas(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[int] = None):
return sample
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : Union[str, torch.device] = None):
__lowerCamelCase : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
__lowerCamelCase : Optional[int] = torch.linspace(1 ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None ,SCREAMING_SNAKE_CASE__ : float = None):
__lowerCamelCase : Optional[int] = sigma_min if sigma_min is not None else self.config.sigma_min
__lowerCamelCase : Optional[int] = sigma_max if sigma_max is not None else self.config.sigma_max
__lowerCamelCase : Any = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
__lowerCamelCase : Optional[Any] = torch.exp(torch.linspace(math.log(SCREAMING_SNAKE_CASE__) ,math.log(SCREAMING_SNAKE_CASE__) ,SCREAMING_SNAKE_CASE__))
__lowerCamelCase : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps])
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[str]):
return torch.where(
timesteps == 0 ,torch.zeros_like(t.to(timesteps.device)) ,self.discrete_sigmas[timesteps - 1].to(timesteps.device) ,)
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
__lowerCamelCase : List[str] = timestep * torch.ones(
sample.shape[0] ,device=sample.device) # torch.repeat_interleave(timestep, sample.shape[0])
__lowerCamelCase : str = (timestep * (len(self.timesteps) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
__lowerCamelCase : Dict = timesteps.to(self.discrete_sigmas.device)
__lowerCamelCase : Optional[Any] = self.discrete_sigmas[timesteps].to(sample.device)
__lowerCamelCase : Optional[Any] = self.get_adjacent_sigma(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__).to(sample.device)
__lowerCamelCase : int = torch.zeros_like(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
__lowerCamelCase : int = diffusion.flatten()
while len(diffusion.shape) < len(sample.shape):
__lowerCamelCase : List[Any] = diffusion.unsqueeze(-1)
__lowerCamelCase : Any = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
__lowerCamelCase : int = randn_tensor(
sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__ ,device=sample.device ,dtype=sample.dtype)
__lowerCamelCase : Optional[int] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
__lowerCamelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=SCREAMING_SNAKE_CASE__ ,prev_sample_mean=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
if self.timesteps is None:
raise ValueError(
'`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler')
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
__lowerCamelCase : Optional[int] = randn_tensor(sample.shape ,layout=sample.layout ,generator=SCREAMING_SNAKE_CASE__).to(sample.device)
# compute step size from the model_output, the noise, and the snr
__lowerCamelCase : str = torch.norm(model_output.reshape(model_output.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = torch.norm(noise.reshape(noise.shape[0] ,-1) ,dim=-1).mean()
__lowerCamelCase : Tuple = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
__lowerCamelCase : Optional[int] = step_size * torch.ones(sample.shape[0]).to(sample.device)
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
__lowerCamelCase : Union[str, Any] = step_size.flatten()
while len(step_size.shape) < len(sample.shape):
__lowerCamelCase : List[str] = step_size.unsqueeze(-1)
__lowerCamelCase : str = sample + step_size * model_output
__lowerCamelCase : Any = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,SCREAMING_SNAKE_CASE__ : torch.FloatTensor ,):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__lowerCamelCase : int = timesteps.to(original_samples.device)
__lowerCamelCase : Any = self.discrete_sigmas.to(original_samples.device)[timesteps]
__lowerCamelCase : Optional[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(SCREAMING_SNAKE_CASE__) * sigmas[:, None, None, None]
)
__lowerCamelCase : str = noise + original_samples
return noisy_samples
def __len__( self : Optional[int]):
return self.config.num_train_timesteps
| 652 | 1 |
from jiwer import compute_measures
import datasets
a ="""\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
a ="""\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
a ="""
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
def lowerCAmelCase ( self : Union[str, Any]):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence'),
'references': datasets.Value('string' ,id='sequence'),
}) ,codebase_urls=['https://github.com/jitsi/jiwer/'] ,reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] ,)
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Tuple=None ,SCREAMING_SNAKE_CASE__ : List[Any]=None ,SCREAMING_SNAKE_CASE__ : Any=False):
if concatenate_texts:
return compute_measures(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)["wer"]
else:
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Union[str, Any] = 0
for prediction, reference in zip(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
__lowerCamelCase : Any = compute_measures(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 652 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="""%(message)s""")
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : str = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : int = features[:, labels == i]
__lowerCamelCase : Optional[int] = data.mean(1 )
# Centralize the data of class i
__lowerCamelCase : int = data - column_reshape(lowerCamelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCamelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : Union[str, Any] = np.dot(lowerCamelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
__lowerCamelCase : Optional[Any] = features.mean(1 )
__lowerCamelCase : Union[str, Any] = np.nan
for i in range(lowerCamelCase__ ):
__lowerCamelCase : Optional[Any] = features[:, labels == i]
__lowerCamelCase : Union[str, Any] = data.shape[1]
__lowerCamelCase : Union[str, Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
__lowerCamelCase : List[str] = device_data * np.dot(
column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ ) , (column_reshape(lowerCamelCase__ ) - column_reshape(lowerCamelCase__ )).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
__lowerCamelCase : Tuple = features.mean(1 )
# Center the dataset
__lowerCamelCase : Any = features - np.reshape(lowerCamelCase__ , (data_mean.size, 1) )
__lowerCamelCase : Optional[int] = np.dot(lowerCamelCase__ , centered_data.T ) / features.shape[1]
__lowerCamelCase , __lowerCamelCase : List[Any] = np.linalg.eigh(lowerCamelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
__lowerCamelCase : Dict = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
__lowerCamelCase : int = np.dot(filtered_eigenvectors.T , lowerCamelCase__ )
logging.info('Principal Component Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
__lowerCamelCase , __lowerCamelCase : Dict = eigh(
covariance_between_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , covariance_within_classes(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) , )
__lowerCamelCase : Union[str, Any] = eigenvectors[:, ::-1][:, :dimensions]
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = np.linalg.svd(lowerCamelCase__ )
__lowerCamelCase : int = svd_matrix[:, 0:dimensions]
__lowerCamelCase : Optional[int] = np.dot(filtered_svd_matrix.T , lowerCamelCase__ )
logging.info('Linear Discriminant Analysis computed' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowerCamelCase__ )
logging.error('Dataset empty' )
raise AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
__lowerCamelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
__lowerCamelCase : Optional[int] = np.array([0, 0, 0, 1, 1] )
__lowerCamelCase : Optional[Any] = 2
__lowerCamelCase : Tuple = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : int = linear_discriminant_analysis(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
if isinstance(lowerCamelCase__ , np.ndarray ):
raise AssertionError(
'Did not raise AssertionError for dimensions > classes' )
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE__ ( ) -> None:
__lowerCamelCase : Dict = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
__lowerCamelCase : Dict = 2
__lowerCamelCase : int = np.array([[6.9282_0323, 8.6602_5404, 10.3923_0485], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCamelCase__ ) as error_info:
__lowerCamelCase : Optional[Any] = principal_component_analysis(lowerCamelCase__ , lowerCamelCase__ )
if not np.allclose(lowerCamelCase__ , lowerCamelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a ={
"""configuration_canine""": ["""CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """CanineConfig"""],
"""tokenization_canine""": ["""CanineTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""CANINE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CanineForMultipleChoice""",
"""CanineForQuestionAnswering""",
"""CanineForSequenceClassification""",
"""CanineForTokenClassification""",
"""CanineLayer""",
"""CanineModel""",
"""CaninePreTrainedModel""",
"""load_tf_weights_in_canine""",
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
a =logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class A_ ( unittest.TestCase ):
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Path ,SCREAMING_SNAKE_CASE__ : Union[str, None] = None ,SCREAMING_SNAKE_CASE__ : Union[List[str], None] = None ,SCREAMING_SNAKE_CASE__ : Union[str, List[str], None] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,):
__lowerCamelCase : List[str] = [file for file in os.listdir(SCREAMING_SNAKE_CASE__) if os.path.isfile(os.path.join(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__))]
if identifier is not None:
__lowerCamelCase : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
for n_ in n_identifier:
__lowerCamelCase : Optional[int] = [file for file in files if n_ not in file]
else:
__lowerCamelCase : Dict = [file for file in files if n_identifier not in file]
__lowerCamelCase : str = ignore_files or []
ignore_files.append('__init__.py')
__lowerCamelCase : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,SCREAMING_SNAKE_CASE__)
if only_modules:
__lowerCamelCase : Optional[int] = file.split('.')[0]
try:
__lowerCamelCase : Optional[Any] = getattr(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = doctest.DocTestSuite(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[int] = unittest.TextTestRunner().run(SCREAMING_SNAKE_CASE__)
self.assertIs(len(result.failures) ,0)
except AttributeError:
logger.info(F"{module_identifier} is not a module.")
else:
__lowerCamelCase : int = doctest.testfile(str('..' / directory / file) ,optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed ,0)
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = 'modeling'
__lowerCamelCase : Dict = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = Path('src/transformers')
__lowerCamelCase : Optional[int] = 'tokenization'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Tuple):
__lowerCamelCase : List[Any] = Path('src/transformers')
__lowerCamelCase : str = 'configuration'
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = Path('src/transformers')
__lowerCamelCase : Any = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,n_identifier=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : int):
__lowerCamelCase : List[Any] = Path('docs/source')
__lowerCamelCase : str = ['favicon.ico']
self.analyze_directory(SCREAMING_SNAKE_CASE__ ,ignore_files=SCREAMING_SNAKE_CASE__ ,only_modules=SCREAMING_SNAKE_CASE__)
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a ={"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a =[
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
a =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 652 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ="""▁"""
a ={"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
a ={
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
a ={"""vinai/bartpho-syllable""": 1024}
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Dict = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any]="<s>" ,SCREAMING_SNAKE_CASE__ : Any="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="</s>" ,SCREAMING_SNAKE_CASE__ : List[str]="<s>" ,SCREAMING_SNAKE_CASE__ : int="<unk>" ,SCREAMING_SNAKE_CASE__ : Dict="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<mask>" ,SCREAMING_SNAKE_CASE__ : Optional[Dict[str, Any]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase : Union[str, Any] = AddedToken(SCREAMING_SNAKE_CASE__ ,lstrip=SCREAMING_SNAKE_CASE__ ,rstrip=SCREAMING_SNAKE_CASE__) if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__) else mask_token
__lowerCamelCase : str = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,sp_model_kwargs=self.sp_model_kwargs ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : int = vocab_file
__lowerCamelCase : Tuple = monolingual_vocab_file
__lowerCamelCase : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(SCREAMING_SNAKE_CASE__))
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : List[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids:
__lowerCamelCase : Any = cnt
cnt += 1
with open(SCREAMING_SNAKE_CASE__ ,'r' ,encoding='utf-8') as f:
for line in f.readlines():
__lowerCamelCase : Any = line.strip().split()[0]
__lowerCamelCase : List[str] = len(self.fairseq_tokens_to_ids)
if str(SCREAMING_SNAKE_CASE__) not in self.fairseq_tokens_to_ids:
__lowerCamelCase : Dict = len(self.fairseq_tokens_to_ids)
__lowerCamelCase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : int):
__lowerCamelCase : Tuple = self.__dict__.copy()
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any]):
__lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs'):
__lowerCamelCase : str = {}
__lowerCamelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : Tuple = [self.cls_token_id]
__lowerCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Dict = [self.sep_token_id]
__lowerCamelCase : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def lowerCAmelCase ( self : List[str]):
return len(self.fairseq_ids_to_tokens)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(SCREAMING_SNAKE_CASE__): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : str):
return self.sp_model.encode(SCREAMING_SNAKE_CASE__ ,out_type=SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[Any]):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Dict):
return self.fairseq_ids_to_tokens[index]
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str):
__lowerCamelCase : Any = ''.join(SCREAMING_SNAKE_CASE__).replace(SCREAMING_SNAKE_CASE__ ,' ').strip()
return out_string
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__lowerCamelCase : Union[str, Any] = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] ,)
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'wb') as fi:
__lowerCamelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(SCREAMING_SNAKE_CASE__)
if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath(
SCREAMING_SNAKE_CASE__) and os.path.isfile(self.monolingual_vocab_file):
copyfile(self.monolingual_vocab_file ,SCREAMING_SNAKE_CASE__)
elif not os.path.isfile(self.monolingual_vocab_file):
with open(SCREAMING_SNAKE_CASE__ ,'w' ,encoding='utf-8') as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(F"{str(SCREAMING_SNAKE_CASE__)} \n")
return out_vocab_file, out_monolingual_vocab_file
| 652 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
a ="""tiny-wmt19-en-ru"""
# Build
# borrowed from a test
a =[
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
a =dict(zip(vocab, range(len(vocab))))
a =["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
a =Path(tmpdirname)
a =build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
a =build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
a =build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
a =FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
a =FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
a =FSMTForConditionalGeneration(config)
print(F"""num of params {tiny_model.num_parameters()}""")
# Test
a =tokenizer(["""Making tiny model"""], return_tensors="""pt""")
a =tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 652 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A_ :
def __init__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict=1_3 ,SCREAMING_SNAKE_CASE__ : int=3_0 ,SCREAMING_SNAKE_CASE__ : int=2 ,SCREAMING_SNAKE_CASE__ : List[Any]=3 ,SCREAMING_SNAKE_CASE__ : str=True ,SCREAMING_SNAKE_CASE__ : int=True ,SCREAMING_SNAKE_CASE__ : List[str]=3_2 ,SCREAMING_SNAKE_CASE__ : Any=2 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=4 ,SCREAMING_SNAKE_CASE__ : List[str]=3_7 ,SCREAMING_SNAKE_CASE__ : Optional[Any]="gelu" ,SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Dict=None ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,):
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Optional[Any] = batch_size
__lowerCamelCase : Dict = image_size
__lowerCamelCase : Optional[Any] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : str = is_training
__lowerCamelCase : List[Any] = use_labels
__lowerCamelCase : Any = hidden_size
__lowerCamelCase : Optional[int] = num_hidden_layers
__lowerCamelCase : Any = num_attention_heads
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : Dict = hidden_act
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : Dict = type_sequence_label_size
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : List[str] = scope
__lowerCamelCase : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCamelCase : str = (image_size // patch_size) ** 2
__lowerCamelCase : str = num_patches + 2
def lowerCAmelCase ( self : Optional[Any]):
__lowerCamelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
__lowerCamelCase : List[Any] = None
if self.use_labels:
__lowerCamelCase : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : List[Any]):
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def lowerCAmelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Optional[Any] = TFDeiTModel(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any):
__lowerCamelCase : Optional[int] = TFDeiTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
__lowerCamelCase : int = 1
__lowerCamelCase : Tuple = TFDeiTForMaskedImageModeling(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size))
def lowerCAmelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
__lowerCamelCase : Dict = self.type_sequence_label_size
__lowerCamelCase : List[Any] = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
# test greyscale images
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Tuple = TFDeiTForImageClassification(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
__lowerCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size))
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Optional[Any] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_UpperCAmelCase : Union[str, Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : List[Any] = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[int] = False
def lowerCAmelCase ( self : Any):
__lowerCamelCase : str = TFDeiTModelTester(self)
__lowerCamelCase : Optional[int] = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=3_7)
def lowerCAmelCase ( self : str):
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def lowerCAmelCase ( self : List[Any]):
pass
def lowerCAmelCase ( self : Dict):
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE__)
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer))
__lowerCamelCase : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE__ ,tf.keras.layers.Dense))
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : Any = [*signature.parameters.keys()]
__lowerCamelCase : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : str):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : Dict):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : str=False):
__lowerCamelCase : Optional[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__)
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowerCAmelCase ( self : Optional[int]):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Union[str, Any] = TFDeiTModel.from_pretrained(SCREAMING_SNAKE_CASE__)
self.assertIsNotNone(SCREAMING_SNAKE_CASE__)
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
__lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase ( self : List[Any]):
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def lowerCAmelCase ( self : List[Any]):
__lowerCamelCase : Optional[int] = TFDeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224')
__lowerCamelCase : int = self.default_image_processor
__lowerCamelCase : Tuple = prepare_img()
__lowerCamelCase : Tuple = image_processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors='tf')
# forward pass
__lowerCamelCase : int = model(**SCREAMING_SNAKE_CASE__)
# verify the logits
__lowerCamelCase : Optional[int] = tf.TensorShape((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[Any] = tf.constant([-1.0266, 0.1912, -1.2861])
self.assertTrue(np.allclose(outputs.logits[0, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1E-4))
| 652 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.