code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : torch.FloatTensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : str , lowerCamelCase__ : int = 6_55_36 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 2 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : str = "fourier" , lowerCamelCase__ : bool = True , lowerCamelCase__ : bool = False , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , lowerCamelCase__ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , lowerCamelCase__ : Tuple[str] = "UNetMidBlock1D" , lowerCamelCase__ : str = None , lowerCamelCase__ : Tuple[int] = (32, 32, 64) , lowerCamelCase__ : str = None , lowerCamelCase__ : int = 8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : bool = False , ) ->Optional[Any]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
_UpperCAmelCase : List[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=lowerCamelCase__ , log=lowerCamelCase__ , flip_sin_to_cos=lowerCamelCase__ )
_UpperCAmelCase : Tuple = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_UpperCAmelCase : Optional[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=lowerCamelCase__ , downscale_freq_shift=lowerCamelCase__ )
_UpperCAmelCase : List[str] = block_out_channels[0]
if use_timestep_embedding:
_UpperCAmelCase : List[Any] = block_out_channels[0] * 4
_UpperCAmelCase : List[str] = TimestepEmbedding(
in_channels=lowerCamelCase__ , time_embed_dim=lowerCamelCase__ , act_fn=lowerCamelCase__ , out_dim=block_out_channels[0] , )
_UpperCAmelCase : List[str] = nn.ModuleList([] )
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : Union[str, Any] = nn.ModuleList([] )
_UpperCAmelCase : Dict = None
# down
_UpperCAmelCase : List[str] = in_channels
for i, down_block_type in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : str = output_channel
_UpperCAmelCase : Any = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_UpperCAmelCase : Optional[int] = i == len(lowerCamelCase__ ) - 1
_UpperCAmelCase : List[str] = get_down_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(lowerCamelCase__ )
# mid
_UpperCAmelCase : List[Any] = get_mid_block(
lowerCamelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=lowerCamelCase__ , add_downsample=lowerCamelCase__ , )
# up
_UpperCAmelCase : Optional[Any] = list(reversed(lowerCamelCase__ ) )
_UpperCAmelCase : Optional[Any] = reversed_block_out_channels[0]
if out_block_type is None:
_UpperCAmelCase : List[str] = out_channels
else:
_UpperCAmelCase : List[str] = block_out_channels[0]
for i, up_block_type in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : Any = output_channel
_UpperCAmelCase : int = (
reversed_block_out_channels[i + 1] if i < len(lowerCamelCase__ ) - 1 else final_upsample_channels
)
_UpperCAmelCase : Tuple = i == len(lowerCamelCase__ ) - 1
_UpperCAmelCase : Dict = get_up_block(
lowerCamelCase__ , num_layers=lowerCamelCase__ , in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(lowerCamelCase__ )
_UpperCAmelCase : Tuple = output_channel
# out
_UpperCAmelCase : Union[str, Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
_UpperCAmelCase : Optional[Any] = get_out_block(
out_block_type=lowerCamelCase__ , num_groups_out=lowerCamelCase__ , embed_dim=block_out_channels[0] , out_channels=lowerCamelCase__ , act_fn=lowerCamelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : torch.FloatTensor , lowerCamelCase__ : Union[torch.Tensor, float, int] , lowerCamelCase__ : bool = True , ) ->Union[UNetaDOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase : Any = timestep
if not torch.is_tensor(lowerCamelCase__ ):
_UpperCAmelCase : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(lowerCamelCase__ ) and len(timesteps.shape ) == 0:
_UpperCAmelCase : List[Any] = timesteps[None].to(sample.device )
_UpperCAmelCase : List[str] = self.time_proj(lowerCamelCase__ )
if self.config.use_timestep_embedding:
_UpperCAmelCase : Optional[int] = self.time_mlp(lowerCamelCase__ )
else:
_UpperCAmelCase : Optional[int] = timestep_embed[..., None]
_UpperCAmelCase : List[Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_UpperCAmelCase : Optional[Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_UpperCAmelCase : List[Any] = ()
for downsample_block in self.down_blocks:
_UpperCAmelCase , _UpperCAmelCase : Tuple = downsample_block(hidden_states=lowerCamelCase__ , temb=lowerCamelCase__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_UpperCAmelCase : List[Any] = self.mid_block(lowerCamelCase__ , lowerCamelCase__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_UpperCAmelCase : Tuple = down_block_res_samples[-1:]
_UpperCAmelCase : Optional[int] = down_block_res_samples[:-1]
_UpperCAmelCase : Optional[Any] = upsample_block(lowerCamelCase__ , res_hidden_states_tuple=lowerCamelCase__ , temb=lowerCamelCase__ )
# 5. post-process
if self.out_block:
_UpperCAmelCase : Union[str, Any] = self.out_block(lowerCamelCase__ , lowerCamelCase__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=lowerCamelCase__ )
| 40
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : str = analyze_text(__lowerCAmelCase )
_UpperCAmelCase : Dict = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
_UpperCAmelCase : Dict = sum(single_char_strings.values() )
# one length string
_UpperCAmelCase : Any = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
_UpperCAmelCase : Any = single_char_strings[ch]
_UpperCAmelCase : Any = my_str / all_sum
my_fir_sum += prob * math.loga(__lowerCAmelCase ) # entropy formula.
# print entropy
print(F"""{round(-1 * my_fir_sum ):.1f}""" )
# two len string
_UpperCAmelCase : Tuple = sum(two_char_strings.values() )
_UpperCAmelCase : Any = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
_UpperCAmelCase : List[Any] = cha + cha
if sequence in two_char_strings:
_UpperCAmelCase : Tuple = two_char_strings[sequence]
_UpperCAmelCase : str = int(__lowerCAmelCase ) / all_sum
my_sec_sum += prob * math.loga(__lowerCAmelCase )
# print second entropy
print(F"""{round(-1 * my_sec_sum ):.1f}""" )
# print the difference between them
print(F"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = Counter() # type: ignore
_UpperCAmelCase : Tuple = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(__lowerCAmelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def __lowerCAmelCase ():
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 40
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowerCamelCase__ = 0
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowerCamelCase__ = tuple[int, int]
class lowerCAmelCase__ :
def __init__( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : Node | None , ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = pos_x
_UpperCAmelCase : Optional[Any] = pos_y
_UpperCAmelCase : Any = (pos_y, pos_x)
_UpperCAmelCase : Optional[Any] = goal_x
_UpperCAmelCase : Dict = goal_y
_UpperCAmelCase : Any = g_cost
_UpperCAmelCase : List[Any] = parent
_UpperCAmelCase : List[Any] = self.calculate_heuristic()
_UpperCAmelCase : Tuple = self.g_cost + self.h_cost
def lowerCAmelCase__ ( self : List[str] ) ->float:
'''simple docstring'''
_UpperCAmelCase : Any = self.pos_x - self.goal_x
_UpperCAmelCase : Optional[Any] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(lowerCamelCase__ ) + abs(lowerCamelCase__ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Any , lowerCamelCase__ : Node ) ->bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : TPosition , lowerCamelCase__ : TPosition ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = [self.start]
_UpperCAmelCase : list[Node] = []
_UpperCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[Any] ) ->list[TPosition]:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_UpperCAmelCase : Tuple = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(lowerCamelCase__ )
self.closed_nodes.append(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = self.get_successors(lowerCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(lowerCamelCase__ )
else:
# retrieve the best current path
_UpperCAmelCase : Dict = self.open_nodes.pop(self.open_nodes.index(lowerCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(lowerCamelCase__ )
else:
self.open_nodes.append(lowerCamelCase__ )
return [self.start.pos]
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Node ) ->list[Node]:
'''simple docstring'''
_UpperCAmelCase : int = []
for action in delta:
_UpperCAmelCase : List[str] = parent.pos_x + action[1]
_UpperCAmelCase : List[str] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowerCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
lowerCamelCase__ , lowerCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowerCamelCase__ , ) )
return successors
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Node | None ) ->list[TPosition]:
'''simple docstring'''
_UpperCAmelCase : Tuple = node
_UpperCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_UpperCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class lowerCAmelCase__ :
def __init__( self : Dict , lowerCamelCase__ : TPosition , lowerCamelCase__ : TPosition ) ->None:
'''simple docstring'''
_UpperCAmelCase : int = AStar(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Any = AStar(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[Any] ) ->list[TPosition]:
'''simple docstring'''
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_UpperCAmelCase : str = self.fwd_astar.open_nodes.pop(0 )
_UpperCAmelCase : Tuple = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
lowerCamelCase__ , lowerCamelCase__ )
self.fwd_astar.closed_nodes.append(lowerCamelCase__ )
self.bwd_astar.closed_nodes.append(lowerCamelCase__ )
_UpperCAmelCase : int = current_bwd_node
_UpperCAmelCase : Optional[int] = current_fwd_node
_UpperCAmelCase : Optional[Any] = {
self.fwd_astar: self.fwd_astar.get_successors(lowerCamelCase__ ),
self.bwd_astar: self.bwd_astar.get_successors(lowerCamelCase__ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(lowerCamelCase__ )
else:
# retrieve the best current path
_UpperCAmelCase : List[str] = astar.open_nodes.pop(
astar.open_nodes.index(lowerCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(lowerCamelCase__ )
else:
astar.open_nodes.append(lowerCamelCase__ )
return [self.fwd_astar.start.pos]
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Node , lowerCamelCase__ : Node ) ->list[TPosition]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.fwd_astar.retrace_path(lowerCamelCase__ )
_UpperCAmelCase : int = self.bwd_astar.retrace_path(lowerCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
_UpperCAmelCase : Union[str, Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowerCamelCase__ = time.time()
lowerCamelCase__ = AStar(init, goal)
lowerCamelCase__ = a_star.search()
lowerCamelCase__ = time.time() - start_time
print(F'''AStar execution time = {end_time:f} seconds''')
lowerCamelCase__ = time.time()
lowerCamelCase__ = BidirectionalAStar(init, goal)
lowerCamelCase__ = time.time() - bd_start_time
print(F'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 40
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase__ :
def __init__( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = {}
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Any = {}
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : float ) ->None:
'''simple docstring'''
if nodea not in self.connections:
self.add_node(lowerCamelCase__ )
if nodea not in self.connections:
self.add_node(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = probability
def lowerCAmelCase__ ( self : Any ) ->list[str]:
'''simple docstring'''
return list(self.connections )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Dict = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = Counter(graph.get_nodes() )
_UpperCAmelCase : int = start
for _ in range(__lowerCAmelCase ):
_UpperCAmelCase : str = graph.transition(__lowerCAmelCase )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 1
|
'''simple docstring'''
import qiskit
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
_UpperCAmelCase : List[Any] = qiskit.QuantumCircuit(__lowerCAmelCase , __lowerCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
_UpperCAmelCase : str = qiskit.execute(__lowerCAmelCase , __lowerCAmelCase , shots=1_000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__lowerCAmelCase )
if __name__ == "__main__":
print(F'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 40
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
import queue
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : str = data
_UpperCAmelCase : Any = None
_UpperCAmelCase : Tuple = None
def __lowerCAmelCase ():
print("\n********Press N to stop entering at any point of time********\n" )
_UpperCAmelCase : Optional[Any] = input("Enter the value of the root node: " ).strip().lower()
_UpperCAmelCase : queue.Queue = queue.Queue()
_UpperCAmelCase : List[Any] = TreeNode(int(__lowerCAmelCase ) )
q.put(__lowerCAmelCase )
while not q.empty():
_UpperCAmelCase : Tuple = q.get()
_UpperCAmelCase : Union[str, Any] = F"""Enter the left node of {node_found.data}: """
_UpperCAmelCase : Any = input(__lowerCAmelCase ).strip().lower() or "n"
if check == "n":
return tree_node
_UpperCAmelCase : List[Any] = TreeNode(int(__lowerCAmelCase ) )
_UpperCAmelCase : List[Any] = left_node
q.put(__lowerCAmelCase )
_UpperCAmelCase : Any = F"""Enter the right node of {node_found.data}: """
_UpperCAmelCase : str = input(__lowerCAmelCase ).strip().lower() or "n"
if check == "n":
return tree_node
_UpperCAmelCase : Dict = TreeNode(int(__lowerCAmelCase ) )
_UpperCAmelCase : List[str] = right_node
q.put(__lowerCAmelCase )
raise
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_UpperCAmelCase : queue.Queue = queue.Queue()
q.put(__lowerCAmelCase )
while not q.empty():
_UpperCAmelCase : List[str] = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_UpperCAmelCase : queue.Queue = queue.Queue()
q.put(__lowerCAmelCase )
while not q.empty():
_UpperCAmelCase : Optional[int] = []
while not q.empty():
_UpperCAmelCase : List[Any] = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_UpperCAmelCase : list[TreeNode] = []
_UpperCAmelCase : List[Any] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(__lowerCAmelCase )
_UpperCAmelCase : Any = n.left
# end of while means current node doesn't have left child
_UpperCAmelCase : str = stack.pop()
# start to traverse its right child
_UpperCAmelCase : List[Any] = n.right
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_UpperCAmelCase : list[TreeNode] = []
_UpperCAmelCase : Optional[Any] = node
while n or stack:
while n:
stack.append(__lowerCAmelCase )
_UpperCAmelCase : int = n.left
_UpperCAmelCase : int = stack.pop()
print(n.data , end="," )
_UpperCAmelCase : int = n.right
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or not node:
return
_UpperCAmelCase , _UpperCAmelCase : Dict = [], []
_UpperCAmelCase : Any = node
stacka.append(__lowerCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
_UpperCAmelCase : List[Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(__lowerCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def __lowerCAmelCase (__lowerCAmelCase = "" , __lowerCAmelCase=50 , __lowerCAmelCase="*" ):
if not s:
return "\n" + width * char
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = divmod(width - len(__lowerCAmelCase ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('Binary Tree Traversals'))
lowerCamelCase__ = build_tree()
print(prompt('Pre Order Traversal'))
pre_order(node)
print(prompt() + '\n')
print(prompt('In Order Traversal'))
in_order(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal'))
post_order(node)
print(prompt() + '\n')
print(prompt('Level Order Traversal'))
level_order(node)
print(prompt() + '\n')
print(prompt('Actual Level Order Traversal'))
level_order_actual(node)
print('*' * 50 + '\n')
print(prompt('Pre Order Traversal - Iteration Version'))
pre_order_iter(node)
print(prompt() + '\n')
print(prompt('In Order Traversal - Iteration Version'))
in_order_iter(node)
print(prompt() + '\n')
print(prompt('Post Order Traversal - Iteration Version'))
post_order_iter(node)
print(prompt())
| 40
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 1
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
lowerCamelCase__ = random.Random()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=1.0 , __lowerCAmelCase=None , __lowerCAmelCase=None ):
if rng is None:
_UpperCAmelCase : Tuple = global_rng
_UpperCAmelCase : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple=7 , lowerCamelCase__ : List[str]=4_00 , lowerCamelCase__ : List[str]=20_00 , lowerCamelCase__ : Tuple=10 , lowerCamelCase__ : str=1_60 , lowerCamelCase__ : int=8 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Optional[Any]=40_00 , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Tuple=True , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : int = parent
_UpperCAmelCase : Union[str, Any] = batch_size
_UpperCAmelCase : Optional[int] = min_seq_length
_UpperCAmelCase : Any = max_seq_length
_UpperCAmelCase : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase : Tuple = padding_value
_UpperCAmelCase : Union[str, Any] = sampling_rate
_UpperCAmelCase : List[str] = return_attention_mask
_UpperCAmelCase : Dict = do_normalize
_UpperCAmelCase : Optional[Any] = feature_size
_UpperCAmelCase : Optional[int] = chunk_length
_UpperCAmelCase : List[Any] = hop_length
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Tuple=False ) ->Optional[int]:
'''simple docstring'''
def _flatten(lowerCamelCase__ : str ):
return list(itertools.chain(*lowerCamelCase__ ) )
if equal_length:
_UpperCAmelCase : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase : Tuple = [np.asarray(lowerCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Dict = WhisperFeatureExtractor if is_speech_available() else None
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[str] = WhisperFeatureExtractionTester(self )
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Dict = feat_extract_first.save_pretrained(lowerCamelCase__ )[0]
check_json_file_has_correct_format(lowerCamelCase__ )
_UpperCAmelCase : int = self.feature_extraction_class.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : Dict = feat_extract_first.to_dict()
_UpperCAmelCase : Any = feat_extract_second.to_dict()
_UpperCAmelCase : List[Any] = feat_extract_first.mel_filters
_UpperCAmelCase : Dict = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase : Dict = os.path.join(lowerCamelCase__ , "feat_extract.json" )
feat_extract_first.to_json_file(lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.feature_extraction_class.from_json_file(lowerCamelCase__ )
_UpperCAmelCase : Tuple = feat_extract_first.to_dict()
_UpperCAmelCase : Optional[Any] = feat_extract_second.to_dict()
_UpperCAmelCase : Optional[int] = feat_extract_first.mel_filters
_UpperCAmelCase : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
_UpperCAmelCase : Tuple = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase : List[Any] = feature_extractor(lowerCamelCase__ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase : Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test batched
_UpperCAmelCase : int = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_features
_UpperCAmelCase : str = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase : List[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
_UpperCAmelCase : Dict = np.asarray(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_features
_UpperCAmelCase : str = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
# Test truncation required
_UpperCAmelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
_UpperCAmelCase : Any = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs]
_UpperCAmelCase : str = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase : int = [np.asarray(lowerCamelCase__ ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase : List[str] = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_features
_UpperCAmelCase : Optional[Any] = feature_extractor(lowerCamelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertTrue(np.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
import torch
_UpperCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : str = np.random.rand(1_00 , 32 ).astype(np.floataa )
_UpperCAmelCase : List[Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase : int = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase : List[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase : Tuple = ds.sort("id" ).select(range(lowerCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
_UpperCAmelCase : Union[str, Any] = self._load_datasamples(1 )
_UpperCAmelCase : int = WhisperFeatureExtractor()
_UpperCAmelCase : List[Any] = feature_extractor(lowerCamelCase__ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCamelCase__ , atol=1E-4 ) )
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase : List[str] = self._load_datasamples(1 )[0]
_UpperCAmelCase : Optional[int] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
_UpperCAmelCase : List[str] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCamelCase__ )[0]
self.assertTrue(np.all(np.mean(lowerCamelCase__ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(lowerCamelCase__ ) - 1 ) < 1E-3 ) )
| 40
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 1
|
'''simple docstring'''
from collections import Counter
from timeit import timeit
def __lowerCAmelCase (__lowerCAmelCase = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def __lowerCAmelCase (__lowerCAmelCase = "" ):
if len(__lowerCAmelCase ) == 0:
return True
_UpperCAmelCase : Tuple = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase : dict[str, int] = {}
for character in lower_case_input_str:
_UpperCAmelCase : int = character_freq_dict.get(__lowerCAmelCase , 0 ) + 1
_UpperCAmelCase : Dict = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __lowerCAmelCase (__lowerCAmelCase = "" ):
print("\nFor string = " , __lowerCAmelCase , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(__lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(__lowerCAmelCase ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
lowerCamelCase__ = input(
'Enter string to determine if it can be rearranged as a palindrome or not: '
).strip()
benchmark(check_str)
lowerCamelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 40
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 1
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = ["image_processor", "tokenizer"]
lowerCAmelCase : Tuple = "CLIPImageProcessor"
lowerCAmelCase : Dict = ("XLMRobertaTokenizer", "XLMRobertaTokenizerFast")
def __init__( self : int , lowerCamelCase__ : Tuple=None , lowerCamelCase__ : Any=None , **lowerCamelCase__ : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCamelCase__ , )
_UpperCAmelCase : List[str] = kwargs.pop("feature_extractor" )
_UpperCAmelCase : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
def __call__( self : List[Any] , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : int=None , **lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_UpperCAmelCase : Union[str, Any] = self.tokenizer(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if images is not None:
_UpperCAmelCase : Union[str, Any] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None and images is not None:
_UpperCAmelCase : Union[str, Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase__ ) , tensor_type=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Any , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ) ->str:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[Any] ) ->Tuple:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.tokenizer.model_input_names
_UpperCAmelCase : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase__ :
def __init__( self : Any , lowerCamelCase__ : Any ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = data
_UpperCAmelCase : Node | None = None
class lowerCAmelCase__ :
def __init__( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = None
_UpperCAmelCase : Any = None
def __iter__( self : List[str] ) ->Iterator[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.head
while self.head:
yield node.data
_UpperCAmelCase : Optional[int] = node.next
if node == self.head:
break
def __len__( self : Any ) ->int:
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self : int ) ->str:
'''simple docstring'''
return "->".join(str(lowerCamelCase__ ) for item in iter(self ) )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Any ) ->None:
'''simple docstring'''
self.insert_nth(len(self ) , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Any ) ->None:
'''simple docstring'''
self.insert_nth(0 , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Any ) ->None:
'''simple docstring'''
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
_UpperCAmelCase : List[Any] = Node(lowerCamelCase__ )
if self.head is None:
_UpperCAmelCase : Optional[int] = new_node # first node points itself
_UpperCAmelCase : List[str] = new_node
elif index == 0: # insert at head
_UpperCAmelCase : Dict = self.head
_UpperCAmelCase : List[str] = new_node
else:
_UpperCAmelCase : Dict = self.head
for _ in range(index - 1 ):
_UpperCAmelCase : Dict = temp.next
_UpperCAmelCase : int = temp.next
_UpperCAmelCase : Optional[int] = new_node
if index == len(self ) - 1: # insert at tail
_UpperCAmelCase : Union[str, Any] = new_node
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
'''simple docstring'''
return self.delete_nth(0 )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Any:
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int = 0 ) ->Any:
'''simple docstring'''
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
_UpperCAmelCase : int = self.head
if self.head == self.tail: # just one node
_UpperCAmelCase : List[Any] = None
elif index == 0: # delete head node
_UpperCAmelCase : Union[str, Any] = self.tail.next.next
_UpperCAmelCase : str = self.head.next
else:
_UpperCAmelCase : Optional[int] = self.head
for _ in range(index - 1 ):
_UpperCAmelCase : Tuple = temp.next
_UpperCAmelCase : Dict = temp.next
_UpperCAmelCase : Union[str, Any] = temp.next.next
if index == len(self ) - 1: # delete at tail
_UpperCAmelCase : List[str] = temp
return delete_node.data
def lowerCAmelCase__ ( self : Tuple ) ->bool:
'''simple docstring'''
return len(self ) == 0
def __lowerCAmelCase ():
_UpperCAmelCase : Tuple = CircularLinkedList()
assert len(__lowerCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__lowerCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__lowerCAmelCase ) == i
circular_linked_list.insert_nth(__lowerCAmelCase , i + 1 )
assert str(__lowerCAmelCase ) == "->".join(str(__lowerCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__lowerCAmelCase ) == "->".join(str(__lowerCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__lowerCAmelCase ) == "->".join(str(__lowerCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__lowerCAmelCase ) == "->".join(str(__lowerCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__lowerCAmelCase ) == "->".join(str(__lowerCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 1
|
'''simple docstring'''
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCamelCase__ = HfArgumentParser(InitializationArguments)
lowerCamelCase__ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCamelCase__ = {
'vocab_size': len(tokenizer),
'scale_attn_by_inverse_layer_idx': True,
'reorder_and_upcast_attn': True,
}
# Load model config (GPT-2 large in this case)
lowerCamelCase__ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCamelCase__ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 40
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[int] = "mgp-str"
def __init__( self : Union[str, Any] , lowerCamelCase__ : List[str]=[32, 1_28] , lowerCamelCase__ : Tuple=4 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : Union[str, Any]=27 , lowerCamelCase__ : int=38 , lowerCamelCase__ : Union[str, Any]=5_02_57 , lowerCamelCase__ : Tuple=3_05_22 , lowerCamelCase__ : Optional[int]=7_68 , lowerCamelCase__ : str=12 , lowerCamelCase__ : Optional[int]=12 , lowerCamelCase__ : Optional[int]=4.0 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : List[Any]=1E-5 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : str=0.0 , lowerCamelCase__ : List[str]=0.0 , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Optional[int]=0.0_2 , **lowerCamelCase__ : List[str] , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCamelCase__ )
_UpperCAmelCase : Tuple = image_size
_UpperCAmelCase : int = patch_size
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : List[Any] = max_token_length
_UpperCAmelCase : str = num_character_labels
_UpperCAmelCase : Optional[Any] = num_bpe_labels
_UpperCAmelCase : Optional[int] = num_wordpiece_labels
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Optional[int] = num_attention_heads
_UpperCAmelCase : Optional[Any] = mlp_ratio
_UpperCAmelCase : Optional[int] = distilled
_UpperCAmelCase : str = layer_norm_eps
_UpperCAmelCase : Optional[Any] = drop_rate
_UpperCAmelCase : Dict = qkv_bias
_UpperCAmelCase : Union[str, Any] = attn_drop_rate
_UpperCAmelCase : Dict = drop_path_rate
_UpperCAmelCase : Dict = output_aa_attentions
_UpperCAmelCase : Optional[Any] = initializer_range
| 40
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 1
|
'''simple docstring'''
from math import isclose, sqrt
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = point_y / 4 / point_x
_UpperCAmelCase : List[str] = 2 * normal_gradient / (1 + normal_gradient * normal_gradient)
_UpperCAmelCase : List[Any] = (1 - normal_gradient * normal_gradient) / (
1 + normal_gradient * normal_gradient
)
_UpperCAmelCase : Union[str, Any] = (sa - ca * incoming_gradient) / (ca + sa * incoming_gradient)
# to find the next point, solve the simultaeneous equations:
# y^2 + 4x^2 = 100
# y - b = m * (x - a)
# ==> A x^2 + B x + C = 0
_UpperCAmelCase : Tuple = outgoing_gradient**2 + 4
_UpperCAmelCase : Optional[int] = 2 * outgoing_gradient * (point_y - outgoing_gradient * point_x)
_UpperCAmelCase : Optional[int] = (point_y - outgoing_gradient * point_x) ** 2 - 100
_UpperCAmelCase : str = (
-linear_term - sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
_UpperCAmelCase : Optional[int] = (
-linear_term + sqrt(linear_term**2 - 4 * quadratic_term * constant_term )
) / (2 * quadratic_term)
# two solutions, one of which is our input point
_UpperCAmelCase : List[str] = x_minus if isclose(__lowerCAmelCase , __lowerCAmelCase ) else x_plus
_UpperCAmelCase : Tuple = point_y + outgoing_gradient * (next_x - point_x)
return next_x, next_y, outgoing_gradient
def __lowerCAmelCase (__lowerCAmelCase = 1.4 , __lowerCAmelCase = -9.6 ):
_UpperCAmelCase : int = 0
_UpperCAmelCase : float = first_x_coord
_UpperCAmelCase : float = first_y_coord
_UpperCAmelCase : float = (1_0.1 - point_y) / (0.0 - point_x)
while not (-0.0_1 <= point_x <= 0.0_1 and point_y > 0):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = next_point(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
num_reflections += 1
return num_reflections
if __name__ == "__main__":
print(F'''{solution() = }''')
| 40
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 1
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = {}
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = super().add_tokens(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
" `placeholder_token` that is not already in the tokenizer." )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict , *lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str=1 , **lowerCamelCase__ : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = []
if num_vec_per_token == 1:
self.try_adding_tokens(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
output.append(lowerCamelCase__ )
else:
_UpperCAmelCase : Optional[int] = []
for i in range(lowerCamelCase__ ):
_UpperCAmelCase : List[Any] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
output.append(lowerCamelCase__ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""" )
_UpperCAmelCase : Union[str, Any] = output
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Any=1.0 ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in range(len(lowerCamelCase__ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=lowerCamelCase__ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_UpperCAmelCase : int = self.token_map[placeholder_token]
_UpperCAmelCase : List[str] = tokens[: 1 + int(len(lowerCamelCase__ ) * prop_tokens_to_load )]
if vector_shuffle:
_UpperCAmelCase : str = copy.copy(lowerCamelCase__ )
random.shuffle(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = text.replace(lowerCamelCase__ , " ".join(lowerCamelCase__ ) )
return text
def __call__( self : int , lowerCamelCase__ : Tuple , *lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : str=1.0 , **lowerCamelCase__ : List[str] ) ->Any:
'''simple docstring'''
return super().__call__(
self.replace_placeholder_tokens_in_text(
lowerCamelCase__ , vector_shuffle=lowerCamelCase__ , prop_tokens_to_load=lowerCamelCase__ ) , *lowerCamelCase__ , **lowerCamelCase__ , )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict , *lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Any=1.0 , **lowerCamelCase__ : str ) ->str:
'''simple docstring'''
return super().encode(
self.replace_placeholder_tokens_in_text(
lowerCamelCase__ , vector_shuffle=lowerCamelCase__ , prop_tokens_to_load=lowerCamelCase__ ) , *lowerCamelCase__ , **lowerCamelCase__ , )
| 40
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 1
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if index == number_of_items:
return 0
_UpperCAmelCase : int = 0
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Tuple = knapsack(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , index + 1 )
if weights[index] <= max_weight:
_UpperCAmelCase : List[Any] = values[index] + knapsack(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , max_weight - weights[index] , index + 1 )
return max(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import _LazyModule
lowerCamelCase__ = {'tokenization_tapex': ['TapexTokenizer']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/resolve/main/config.json',
'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/config.json',
'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json',
'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json',
'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/config.json',
'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = "bloom"
lowerCAmelCase : Any = ["past_key_values"]
lowerCAmelCase : Union[str, Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self : Any , lowerCamelCase__ : List[Any]=25_08_80 , lowerCamelCase__ : List[str]=64 , lowerCamelCase__ : List[str]=2 , lowerCamelCase__ : List[Any]=8 , lowerCamelCase__ : Any=1E-5 , lowerCamelCase__ : str=0.0_2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : str=1 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Any=False , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : List[str]=False , **lowerCamelCase__ : Union[str, Any] , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = vocab_size
# Backward compatibility with n_embed kwarg
_UpperCAmelCase : Optional[Any] = kwargs.pop("n_embed" , lowerCamelCase__ )
_UpperCAmelCase : int = hidden_size if n_embed is None else n_embed
_UpperCAmelCase : Optional[Any] = n_layer
_UpperCAmelCase : Dict = n_head
_UpperCAmelCase : Union[str, Any] = layer_norm_epsilon
_UpperCAmelCase : List[Any] = initializer_range
_UpperCAmelCase : Optional[Any] = use_cache
_UpperCAmelCase : Dict = pretraining_tp
_UpperCAmelCase : int = apply_residual_connection_post_layernorm
_UpperCAmelCase : str = hidden_dropout
_UpperCAmelCase : List[Any] = attention_dropout
_UpperCAmelCase : int = bos_token_id
_UpperCAmelCase : List[str] = eos_token_id
_UpperCAmelCase : List[str] = slow_but_exact
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Optional[Any] = version.parse("1.12" )
def __init__( self : List[Any] , lowerCamelCase__ : PretrainedConfig , lowerCamelCase__ : str = "default" , lowerCamelCase__ : List[PatchingSpec] = None , lowerCamelCase__ : bool = False , ) ->int:
'''simple docstring'''
super().__init__(lowerCamelCase__ , task=lowerCamelCase__ , patching_specs=lowerCamelCase__ , use_past=lowerCamelCase__ )
if not getattr(self._config , "pad_token_id" , lowerCamelCase__ ):
# TODO: how to do that better?
_UpperCAmelCase : Union[str, Any] = 0
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCAmelCase : int = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowerCamelCase__ , direction="inputs" , inverted_values_shape=lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
_UpperCAmelCase : Optional[int] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowerCAmelCase__ ( self : Dict ) ->int:
'''simple docstring'''
return self._config.n_layer
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
return self._config.n_head
@property
def lowerCAmelCase__ ( self : Any ) ->float:
'''simple docstring'''
return 1E-3
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : "PreTrainedTokenizer" , lowerCamelCase__ : int = -1 , lowerCamelCase__ : int = -1 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Optional["TensorType"] = None , ) ->Mapping[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = super(lowerCamelCase__ , self ).generate_dummy_inputs(
lowerCamelCase__ , batch_size=lowerCamelCase__ , seq_length=lowerCamelCase__ , is_pair=lowerCamelCase__ , framework=lowerCamelCase__ )
# We need to order the input in the way they appears in the forward()
_UpperCAmelCase : Optional[int] = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase : List[str] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_UpperCAmelCase : Optional[Any] = seqlen + 2
_UpperCAmelCase : int = self._config.hidden_size // self.num_attention_heads
_UpperCAmelCase : Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
_UpperCAmelCase : str = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
_UpperCAmelCase : Optional[int] = [
(torch.zeros(lowerCamelCase__ ), torch.zeros(lowerCamelCase__ )) for _ in range(self.num_layers )
]
_UpperCAmelCase : str = common_inputs["attention_mask"]
if self.use_past:
_UpperCAmelCase : str = ordered_inputs["attention_mask"].dtype
_UpperCAmelCase : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(lowerCamelCase__ , lowerCamelCase__ , dtype=lowerCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def lowerCAmelCase__ ( self : List[Any] ) ->int:
'''simple docstring'''
return 13
| 40
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 1
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : jnp.ndarray
lowerCAmelCase : jnp.ndarray
class lowerCAmelCase__ ( nn.Module ):
lowerCAmelCase : int
lowerCAmelCase : Tuple[int] = (16, 32, 96, 256)
lowerCAmelCase : jnp.dtype = jnp.floataa
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
_UpperCAmelCase : Tuple = []
for i in range(len(self.block_out_channels ) - 1 ):
_UpperCAmelCase : Union[str, Any] = self.block_out_channels[i]
_UpperCAmelCase : List[Any] = self.block_out_channels[i + 1]
_UpperCAmelCase : Optional[int] = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase__ )
_UpperCAmelCase : Tuple = nn.Conv(
lowerCamelCase__ , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = blocks
_UpperCAmelCase : Optional[Any] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Optional[Any] , lowerCamelCase__ : Tuple ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.conv_in(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = nn.silu(lowerCamelCase__ )
for block in self.blocks:
_UpperCAmelCase : str = block(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = nn.silu(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = self.conv_out(lowerCamelCase__ )
return embedding
@flax_register_to_config
class lowerCAmelCase__ ( nn.Module , UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = 32
lowerCAmelCase : int = 4
lowerCAmelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase : Union[bool, Tuple[bool]] = False
lowerCAmelCase : Tuple[int] = (320, 640, 1_280, 1_280)
lowerCAmelCase : int = 2
lowerCAmelCase : Union[int, Tuple[int]] = 8
lowerCAmelCase : Optional[Union[int, Tuple[int]]] = None
lowerCAmelCase : int = 1_280
lowerCAmelCase : float = 0.0
lowerCAmelCase : bool = False
lowerCAmelCase : jnp.dtype = jnp.floataa
lowerCAmelCase : bool = True
lowerCAmelCase : int = 0
lowerCAmelCase : str = "rgb"
lowerCAmelCase : Tuple[int] = (16, 32, 96, 256)
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : jax.random.KeyArray ) ->FrozenDict:
'''simple docstring'''
_UpperCAmelCase : str = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCAmelCase : Optional[Any] = jnp.zeros(lowerCamelCase__ , dtype=jnp.floataa )
_UpperCAmelCase : Tuple = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCAmelCase : Any = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCAmelCase : Optional[Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
_UpperCAmelCase : int = jnp.zeros(lowerCamelCase__ , dtype=jnp.floataa )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = jax.random.split(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = {"params": params_rng, "dropout": dropout_rng}
return self.init(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )["params"]
def lowerCAmelCase__ ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.block_out_channels
_UpperCAmelCase : List[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCAmelCase : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
_UpperCAmelCase : Optional[Any] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCAmelCase : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCAmelCase : Optional[Any] = FlaxTimestepEmbedding(lowerCamelCase__ , dtype=self.dtype )
_UpperCAmelCase : Union[str, Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
_UpperCAmelCase : int = self.only_cross_attention
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : str = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCAmelCase : Dict = []
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = block_out_channels[0]
_UpperCAmelCase : List[Any] = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase__ )
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCAmelCase : Dict = output_channel
_UpperCAmelCase : Optional[Any] = block_out_channels[i]
_UpperCAmelCase : int = i == len(lowerCamelCase__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCAmelCase : Union[str, Any] = FlaxCrossAttnDownBlockaD(
in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
_UpperCAmelCase : Union[str, Any] = FlaxDownBlockaD(
in_channels=lowerCamelCase__ , out_channels=lowerCamelCase__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCamelCase__ )
for _ in range(self.layers_per_block ):
_UpperCAmelCase : Dict = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase__ )
if not is_final_block:
_UpperCAmelCase : List[Any] = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCamelCase__ )
_UpperCAmelCase : List[str] = down_blocks
_UpperCAmelCase : List[Any] = controlnet_down_blocks
# mid
_UpperCAmelCase : Tuple = block_out_channels[-1]
_UpperCAmelCase : str = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCamelCase__ , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
_UpperCAmelCase : Optional[int] = nn.Conv(
lowerCamelCase__ , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : float = 1.0 , lowerCamelCase__ : bool = True , lowerCamelCase__ : bool = False , ) ->Union[FlaxControlNetOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_UpperCAmelCase : Optional[Any] = jnp.flip(lowerCamelCase__ , axis=1 )
# 1. time
if not isinstance(lowerCamelCase__ , jnp.ndarray ):
_UpperCAmelCase : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCamelCase__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCAmelCase : List[str] = timesteps.astype(dtype=jnp.floataa )
_UpperCAmelCase : Optional[int] = jnp.expand_dims(lowerCamelCase__ , 0 )
_UpperCAmelCase : Any = self.time_proj(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = self.time_embedding(lowerCamelCase__ )
# 2. pre-process
_UpperCAmelCase : List[Any] = jnp.transpose(lowerCamelCase__ , (0, 2, 3, 1) )
_UpperCAmelCase : Dict = self.conv_in(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = jnp.transpose(lowerCamelCase__ , (0, 2, 3, 1) )
_UpperCAmelCase : Optional[Any] = self.controlnet_cond_embedding(lowerCamelCase__ )
sample += controlnet_cond
# 3. down
_UpperCAmelCase : int = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase , _UpperCAmelCase : str = down_block(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , deterministic=not train )
else:
_UpperCAmelCase , _UpperCAmelCase : List[str] = down_block(lowerCamelCase__ , lowerCamelCase__ , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
_UpperCAmelCase : Optional[int] = self.mid_block(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , deterministic=not train )
# 5. contronet blocks
_UpperCAmelCase : List[str] = ()
for down_block_res_sample, controlnet_block in zip(lowerCamelCase__ , self.controlnet_down_blocks ):
_UpperCAmelCase : Dict = controlnet_block(lowerCamelCase__ )
controlnet_down_block_res_samples += (down_block_res_sample,)
_UpperCAmelCase : Dict = controlnet_down_block_res_samples
_UpperCAmelCase : Dict = self.controlnet_mid_block(lowerCamelCase__ )
# 6. scaling
_UpperCAmelCase : int = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCamelCase__ , mid_block_res_sample=lowerCamelCase__ )
| 40
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 1
|
'''simple docstring'''
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
lowerCamelCase__ = 'src/transformers'
lowerCamelCase__ = 'docs/source/en/tasks'
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
with open(__lowerCAmelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_UpperCAmelCase : Dict = f.readlines()
# Find the start prompt.
_UpperCAmelCase : Optional[Any] = 0
while not lines[start_index].startswith(__lowerCAmelCase ):
start_index += 1
start_index += 1
_UpperCAmelCase : Tuple = start_index
while not lines[end_index].startswith(__lowerCAmelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(TRANSFORMERS_PATH)
lowerCamelCase__ = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
lowerCamelCase__ = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = TASK_GUIDE_TO_MODELS[task_guide]
_UpperCAmelCase : Optional[Any] = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__lowerCAmelCase , set() )
_UpperCAmelCase : Optional[Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False ):
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = _find_text_in_file(
filename=os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
_UpperCAmelCase : List[str] = get_model_list_for_task(__lowerCAmelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
" to fix this." )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
lowerCamelCase__ = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 40
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 1
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Any = emb.weight.shape
_UpperCAmelCase : List[Any] = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Tuple = emb.weight.data
return lin_layer
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
_UpperCAmelCase : Tuple = mam_aaa["args"] or mam_aaa["cfg"]["model"]
_UpperCAmelCase : Optional[Any] = mam_aaa["model"]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : List[str] = state_dict["encoder.embed_tokens.weight"].shape[0]
_UpperCAmelCase : List[str] = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
_UpperCAmelCase : Tuple = state_dict["decoder.embed_tokens.weight"]
_UpperCAmelCase : str = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase : Optional[int] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 700
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
from math import pow, sqrt
def __lowerCAmelCase (*__lowerCAmelCase ):
_UpperCAmelCase : str = len(__A ) > 0 and all(value > 0.0 for value in values )
return result
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__A , __A , __A )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 701
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 0
|
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = UniSpeechSatForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = downstream_dict["projector.weight"]
_UpperCAmelCase : Tuple = downstream_dict["projector.bias"]
_UpperCAmelCase : Dict = downstream_dict["model.post_net.linear.weight"]
_UpperCAmelCase : Dict = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = downstream_dict["model.linear.weight"]
_UpperCAmelCase : Optional[Any] = downstream_dict["model.linear.bias"]
return model
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = UniSpeechSatForXVector.from_pretrained(_SCREAMING_SNAKE_CASE , config=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = downstream_dict["connector.weight"]
_UpperCAmelCase : Tuple = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
_UpperCAmelCase : Tuple = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
_UpperCAmelCase : List[str] = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
_UpperCAmelCase : str = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
_UpperCAmelCase : int = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
_UpperCAmelCase : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
_UpperCAmelCase : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
_UpperCAmelCase : List[Any] = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
_UpperCAmelCase : Union[str, Any] = checkpoint["Downstream"]
_UpperCAmelCase : Tuple = UniSpeechSatConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
_UpperCAmelCase : int = convert_classification(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif arch.endswith("ForAudioFrameClassification" ):
_UpperCAmelCase : Optional[Any] = convert_diarization(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif arch.endswith("ForXVector" ):
_UpperCAmelCase : Tuple = convert_xvector(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
_UpperCAmelCase : Tuple = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_SCREAMING_SNAKE_CASE )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowerCamelCase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 702
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 0
|
'''simple docstring'''
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = args.log_outputs
_UpperCAmelCase : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
_UpperCAmelCase : int = load_metric("wer" )
_UpperCAmelCase : int = load_metric("cer" )
# compute metrics
_UpperCAmelCase : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
_UpperCAmelCase : Optional[Any] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
_UpperCAmelCase : List[str] = F"""WER: {wer_result}\nCER: {cer_result}"""
print(__lowerCAmelCase )
with open(F"""{dataset_id}_eval_results.txt""" , "w" ) as f:
f.write(__lowerCAmelCase )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
_UpperCAmelCase : Optional[int] = F"""log_{dataset_id}_predictions.txt"""
_UpperCAmelCase : Optional[Any] = F"""log_{dataset_id}_targets.txt"""
with open(__lowerCAmelCase , "w" ) as p, open(__lowerCAmelCase , "w" ) as t:
# mapping function to write output
def write_to_file(__lowerCAmelCase , __lowerCAmelCase ):
p.write(F"""{i}""" + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F"""{i}""" + "\n" )
t.write(batch["target"] + "\n" )
result.map(__lowerCAmelCase , with_indices=__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
_UpperCAmelCase : int = re.sub(__lowerCAmelCase , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
_UpperCAmelCase : Union[str, Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
_UpperCAmelCase : Dict = " ".join(text.split(__lowerCAmelCase ) )
return text
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=__lowerCAmelCase )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
_UpperCAmelCase : Union[str, Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
_UpperCAmelCase : Dict = feature_extractor.sampling_rate
# resample audio
_UpperCAmelCase : int = dataset.cast_column("audio" , Audio(sampling_rate=__lowerCAmelCase ) )
# load eval pipeline
if args.device is None:
_UpperCAmelCase : Union[str, Any] = 0 if torch.cuda.is_available() else -1
_UpperCAmelCase : Dict = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
_UpperCAmelCase : Union[str, Any] = prediction["text"]
_UpperCAmelCase : List[str] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
_UpperCAmelCase : Any = dataset.map(__lowerCAmelCase , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
lowerCamelCase__ = parser.parse_args()
main(args)
| 703
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ) -> float:
_UpperCAmelCase : str = u
for i in range(1 , _lowercase ):
_UpperCAmelCase : str = temp * (u - i)
return temp
def __lowerCAmelCase () -> None:
_UpperCAmelCase : Any = int(input("enter the numbers of values: " ) )
_UpperCAmelCase : list[list[float]] = []
for _ in range(_lowercase ):
y.append([] )
for i in range(_lowercase ):
for j in range(_lowercase ):
y[i].append(_lowercase )
_UpperCAmelCase : List[str] = 0
print("enter the values of parameters in a list: " )
_UpperCAmelCase : Dict = list(map(_lowercase , input().split() ) )
print("enter the values of corresponding parameters: " )
for i in range(_lowercase ):
_UpperCAmelCase : Union[str, Any] = float(input() )
_UpperCAmelCase : str = int(input("enter the value to interpolate: " ) )
_UpperCAmelCase : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , _lowercase ):
for j in range(n - i ):
_UpperCAmelCase : Tuple = y[j + 1][i - 1] - y[j][i - 1]
_UpperCAmelCase : List[str] = y[0][0]
for i in range(1 , _lowercase ):
summ += (ucal(_lowercase , _lowercase ) * y[0][i]) / math.factorial(_lowercase )
print(F"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 704
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase = 100 ):
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : str = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 0
|
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
lowerCamelCase__ = collections.namedtuple('_Datasets', ['train', 'validation', 'test'])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
lowerCamelCase__ = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = numpy.dtype(numpy.uintaa ).newbyteorder(">" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=lowerCamelCase_ )[0]
@deprecated(lowerCamelCase_ , "Please use tf.data to implement this functionality." )
def __lowerCAmelCase (__lowerCAmelCase ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
_UpperCAmelCase : Any = _readaa(lowerCamelCase_ )
if magic != 2_051:
raise ValueError(
"Invalid magic number %d in MNIST image file: %s" % (magic, f.name) )
_UpperCAmelCase : str = _readaa(lowerCamelCase_ )
_UpperCAmelCase : str = _readaa(lowerCamelCase_ )
_UpperCAmelCase : Union[str, Any] = _readaa(lowerCamelCase_ )
_UpperCAmelCase : str = bytestream.read(rows * cols * num_images )
_UpperCAmelCase : List[Any] = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
_UpperCAmelCase : Tuple = data.reshape(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , 1 )
return data
@deprecated(lowerCamelCase_ , "Please use tf.one_hot on tensors." )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = labels_dense.shape[0]
_UpperCAmelCase : Optional[int] = numpy.arange(lowerCamelCase_ ) * num_classes
_UpperCAmelCase : Optional[int] = numpy.zeros((num_labels, num_classes) )
_UpperCAmelCase : Union[str, Any] = 1
return labels_one_hot
@deprecated(lowerCamelCase_ , "Please use tf.data to implement this functionality." )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=10 ):
print("Extracting" , f.name )
with gzip.GzipFile(fileobj=lowerCamelCase_ ) as bytestream:
_UpperCAmelCase : int = _readaa(lowerCamelCase_ )
if magic != 2_049:
raise ValueError(
"Invalid magic number %d in MNIST label file: %s" % (magic, f.name) )
_UpperCAmelCase : Tuple = _readaa(lowerCamelCase_ )
_UpperCAmelCase : Optional[int] = bytestream.read(lowerCamelCase_ )
_UpperCAmelCase : Any = numpy.frombuffer(lowerCamelCase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(lowerCamelCase_ , lowerCamelCase_ )
return labels
class lowerCAmelCase__ :
@deprecated(
UpperCAmelCase_ , "Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models." , )
def __init__( self : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Tuple=dtypes.floataa , lowerCamelCase__ : int=True , lowerCamelCase__ : Union[str, Any]=None , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = random_seed.get_seed(UpperCAmelCase_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
_UpperCAmelCase : Optional[Any] = dtypes.as_dtype(UpperCAmelCase_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("Invalid image dtype %r, expected uint8 or float32" % dtype )
if fake_data:
_UpperCAmelCase : Any = 1_00_00
_UpperCAmelCase : int = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"""images.shape: {images.shape} labels.shape: {labels.shape}"""
_UpperCAmelCase : int = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
_UpperCAmelCase : Any = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
_UpperCAmelCase : Union[str, Any] = images.astype(numpy.floataa )
_UpperCAmelCase : Dict = numpy.multiply(UpperCAmelCase_ , 1.0 / 2_55.0 )
_UpperCAmelCase : Union[str, Any] = images
_UpperCAmelCase : List[str] = labels
_UpperCAmelCase : int = 0
_UpperCAmelCase : Any = 0
@property
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
return self._images
@property
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
return self._labels
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
return self._num_examples
@property
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
return self._epochs_completed
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : List[Any]=False , lowerCamelCase__ : Dict=True ) ->List[Any]:
'''simple docstring'''
if fake_data:
_UpperCAmelCase : List[Any] = [1] * 7_84
_UpperCAmelCase : Optional[int] = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(UpperCAmelCase_ )],
[fake_label for _ in range(UpperCAmelCase_ )],
)
_UpperCAmelCase : Tuple = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
_UpperCAmelCase : List[str] = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
_UpperCAmelCase : Optional[int] = self.images[perma]
_UpperCAmelCase : List[Any] = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
_UpperCAmelCase : List[Any] = self._num_examples - start
_UpperCAmelCase : int = self._images[start : self._num_examples]
_UpperCAmelCase : int = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
_UpperCAmelCase : Any = numpy.arange(self._num_examples )
numpy.random.shuffle(UpperCAmelCase_ )
_UpperCAmelCase : int = self.images[perm]
_UpperCAmelCase : str = self.labels[perm]
# Start next epoch
_UpperCAmelCase : Optional[int] = 0
_UpperCAmelCase : List[str] = batch_size - rest_num_examples
_UpperCAmelCase : List[Any] = self._index_in_epoch
_UpperCAmelCase : Any = self._images[start:end]
_UpperCAmelCase : Optional[int] = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
_UpperCAmelCase : List[Any] = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(lowerCamelCase_ , "Please write your own downloading logic." )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not gfile.Exists(lowerCamelCase_ ):
gfile.MakeDirs(lowerCamelCase_ )
_UpperCAmelCase : Optional[Any] = os.path.join(lowerCamelCase_ , lowerCamelCase_ )
if not gfile.Exists(lowerCamelCase_ ):
urllib.request.urlretrieve(lowerCamelCase_ , lowerCamelCase_ ) # noqa: S310
with gfile.GFile(lowerCamelCase_ ) as f:
_UpperCAmelCase : List[str] = f.size()
print("Successfully downloaded" , lowerCamelCase_ , lowerCamelCase_ , "bytes." )
return filepath
@deprecated(
lowerCamelCase_ , "Please use alternatives such as:" " tensorflow_datasets.load(\'mnist\')" )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=dtypes.floataa , __lowerCAmelCase=True , __lowerCAmelCase=5_000 , __lowerCAmelCase=None , __lowerCAmelCase=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=lowerCamelCase_ , one_hot=lowerCamelCase_ , dtype=lowerCamelCase_ , seed=lowerCamelCase_ )
_UpperCAmelCase : Any = fake()
_UpperCAmelCase : Optional[int] = fake()
_UpperCAmelCase : int = fake()
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
if not source_url: # empty string check
_UpperCAmelCase : Any = DEFAULT_SOURCE_URL
_UpperCAmelCase : int = 'train-images-idx3-ubyte.gz'
_UpperCAmelCase : int = 'train-labels-idx1-ubyte.gz'
_UpperCAmelCase : Union[str, Any] = 't10k-images-idx3-ubyte.gz'
_UpperCAmelCase : Union[str, Any] = 't10k-labels-idx1-ubyte.gz'
_UpperCAmelCase : Any = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_images_file )
with gfile.Open(lowerCamelCase_ , "rb" ) as f:
_UpperCAmelCase : Any = _extract_images(lowerCamelCase_ )
_UpperCAmelCase : List[str] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + train_labels_file )
with gfile.Open(lowerCamelCase_ , "rb" ) as f:
_UpperCAmelCase : int = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
_UpperCAmelCase : List[Any] = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_images_file )
with gfile.Open(lowerCamelCase_ , "rb" ) as f:
_UpperCAmelCase : str = _extract_images(lowerCamelCase_ )
_UpperCAmelCase : Tuple = _maybe_download(
lowerCamelCase_ , lowerCamelCase_ , source_url + test_labels_file )
with gfile.Open(lowerCamelCase_ , "rb" ) as f:
_UpperCAmelCase : Tuple = _extract_labels(lowerCamelCase_ , one_hot=lowerCamelCase_ )
if not 0 <= validation_size <= len(lowerCamelCase_ ):
_UpperCAmelCase : Tuple = (
'Validation size should be between 0 and '
F"""{len(lowerCamelCase_ )}. Received: {validation_size}."""
)
raise ValueError(lowerCamelCase_ )
_UpperCAmelCase : Dict = train_images[:validation_size]
_UpperCAmelCase : str = train_labels[:validation_size]
_UpperCAmelCase : List[str] = train_images[validation_size:]
_UpperCAmelCase : Any = train_labels[validation_size:]
_UpperCAmelCase : List[str] = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
_UpperCAmelCase : Any = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
_UpperCAmelCase : Optional[int] = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
_UpperCAmelCase : Union[str, Any] = _DataSet(lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ )
return _Datasets(train=lowerCamelCase_ , validation=lowerCamelCase_ , test=lowerCamelCase_ )
| 706
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = list[list[int]]
# assigning initial values to the grid
lowerCamelCase__ = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowerCamelCase__ = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __lowerCAmelCase (__lowerCAmelCase ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __lowerCAmelCase (__lowerCAmelCase ):
if location := find_empty_location(__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : str = digit
if sudoku(__lowerCAmelCase ) is not None:
return grid
_UpperCAmelCase : Union[str, Any] = 0
return None
def __lowerCAmelCase (__lowerCAmelCase ):
for row in grid:
for cell in row:
print(__lowerCAmelCase , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
lowerCamelCase__ = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 707
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 708
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Any = 11
_UpperCAmelCase : Union[str, Any] = int("1" + "0" * digit_len )
for num in range(__lowerCAmelCase , __lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(__lowerCAmelCase , __lowerCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
_UpperCAmelCase : List[str] = 10
return solutions
def __lowerCAmelCase (__lowerCAmelCase = 2 ):
_UpperCAmelCase : Optional[int] = 1.0
for fraction in fraction_list(__lowerCAmelCase ):
_UpperCAmelCase : Dict = Fraction(__lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(__lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 709
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import tensorflow as tf
from .utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(lowerCamelCase__ , np.ndarray ):
return list(tensor.shape )
_UpperCAmelCase : List[Any] = tf.shape(lowerCamelCase__ )
if tensor.shape == tf.TensorShape(lowerCamelCase__ ):
return dynamic
_UpperCAmelCase : List[str] = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(lowerCamelCase__ )]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None ):
return tf.nn.softmax(logits=logits + 1e-9 , axis=lowerCamelCase__ , name=lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1e-5 , __lowerCAmelCase=-1 ):
if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise NotImplementedError("Only 1D weight and bias tensors are supported for now, with only a single axis." )
# Get mean and variance on the axis to be normalized
_UpperCAmelCase : Tuple = tf.nn.moments(lowerCamelCase__ , axes=[axis] , keepdims=lowerCamelCase__ )
if axis != -1:
# Reshape scale and weight to have the same rank as inputs, but with 1 dimensions
# on every dimension except axis
_UpperCAmelCase : Dict = [1] * inputs.shape.rank
_UpperCAmelCase : Any = shape_list(lowerCamelCase__ )[axis]
_UpperCAmelCase : Tuple = tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : str = tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
# Compute layer normalization using the batch_normalization
# function.
_UpperCAmelCase : List[str] = tf.nn.batch_normalization(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , offset=lowerCamelCase__ , scale=lowerCamelCase__ , variance_epsilon=lowerCamelCase__ , )
return outputs
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=0 , __lowerCAmelCase=-1 ):
if end_dim < 0:
end_dim += input.shape.rank
if start_dim < 0:
start_dim += input.shape.rank
if start_dim == end_dim:
return input
_UpperCAmelCase : List[Any] = tf.shape(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] )
_UpperCAmelCase : List[str] = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 )
return tf.reshape(lowerCamelCase__ , lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
if not isinstance(lowerCamelCase__ , tf.Tensor ):
_UpperCAmelCase : Optional[Any] = tf.convert_to_tensor(lowerCamelCase__ ) # Catches stray NumPy inputs
if encoder_attention_mask.shape.rank == 3:
_UpperCAmelCase : Union[str, Any] = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.shape.rank == 2:
_UpperCAmelCase : Any = encoder_attention_mask[:, None, None, :]
# T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
# Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
# /transformer/transformer_layers.py#L270
# encoder_extended_attention_mask = (encoder_extended_attention_mask ==
# encoder_extended_attention_mask.transpose(-1, -2))
_UpperCAmelCase : Dict = (
tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask
) * encoder_extended_attention_mask.dtype.min
return encoder_extended_attention_mask
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = "input_ids" ):
tf.debugging.assert_less(
lowerCamelCase__ , tf.cast(lowerCamelCase__ , dtype=tensor.dtype ) , message=(
F"""The maximum value of {tensor_name} ({tf.math.reduce_max(lowerCamelCase__ )}) must be smaller than the embedding """
F"""layer's input dimension ({embed_dim}). The likely cause is some problem at tokenization time."""
) , )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = 64_512
# Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT`
# because in that case even chunking the array would not make the saving
# possible.
_UpperCAmelCase : List[Any] = [x for x in data if len(lowerCamelCase__ ) > HDF5_OBJECT_HEADER_LIMIT]
# Expecting this to never be true.
if bad_attributes:
raise RuntimeError(
"The following attributes cannot be saved to HDF5 file because "
F"""they are larger than {HDF5_OBJECT_HEADER_LIMIT} """
F"""bytes: {bad_attributes}""" )
_UpperCAmelCase : Any = np.asarray(lowerCamelCase__ )
_UpperCAmelCase : str = 1
_UpperCAmelCase : Tuple = np.array_split(lowerCamelCase__ , lowerCamelCase__ )
# This will never loop forever thanks to the test above.
while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ):
num_chunks += 1
_UpperCAmelCase : Tuple = np.array_split(lowerCamelCase__ , lowerCamelCase__ )
if num_chunks > 1:
for chunk_id, chunk_data in enumerate(lowerCamelCase__ ):
_UpperCAmelCase : Tuple = chunk_data
else:
_UpperCAmelCase : int = data
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if name in group.attrs:
_UpperCAmelCase : Optional[int] = [n.decode("utf8" ) if hasattr(lowerCamelCase__ , "decode" ) else n for n in group.attrs[name]]
else:
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Optional[Any] = 0
while "%s%d" % (name, chunk_id) in group.attrs:
data.extend(
[n.decode("utf8" ) if hasattr(lowerCamelCase__ , "decode" ) else n for n in group.attrs["%s%d" % (name, chunk_id)]] )
chunk_id += 1
return data
def __lowerCAmelCase (__lowerCAmelCase ):
def _expand_single_ad_tensor(__lowerCAmelCase ):
if isinstance(lowerCamelCase__ , tf.Tensor ) and t.shape.rank == 1:
return tf.expand_dims(lowerCamelCase__ , axis=-1 )
return t
return tf.nest.map_structure(_expand_single_ad_tensor , lowerCamelCase__ )
| 710
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def __lowerCAmelCase (__lowerCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[Any] = 11
_UpperCAmelCase : str = int("1" + "0" * digit_len )
for num in range(_lowerCAmelCase , _lowerCAmelCase ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(_lowerCAmelCase , _lowerCAmelCase ):
solutions.append(F"""{num}/{den}""" )
den += 1
num += 1
_UpperCAmelCase : Any = 10
return solutions
def __lowerCAmelCase (__lowerCAmelCase = 2 ):
'''simple docstring'''
_UpperCAmelCase : int = 1.0
for fraction in fraction_list(_lowerCAmelCase ):
_UpperCAmelCase : List[Any] = Fraction(_lowerCAmelCase )
result *= frac.denominator / frac.numerator
return int(_lowerCAmelCase )
if __name__ == "__main__":
print(solution())
| 711
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 0
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('fixtures/test_sentencepiece_bpe_char.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( UpperCamelCase_ , unittest.TestCase ):
lowerCAmelCase : Any = SpeechTaTokenizer
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : List[Any] = True
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Tuple = SpeechTaTokenizer(_a )
_UpperCAmelCase : Optional[Any] = AddedToken("<mask>" , lstrip=_a , rstrip=_a )
_UpperCAmelCase : Tuple = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = """this is a test"""
_UpperCAmelCase : List[Any] = """this is a test"""
return input_text, output_text
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=False , lowerCamelCase__ : Optional[int]=20 , lowerCamelCase__ : List[str]=5 ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_input_output_texts(_a )
_UpperCAmelCase : List[str] = tokenizer.encode(_a , add_special_tokens=_a )
_UpperCAmelCase : str = tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
return text, ids
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """<pad>"""
_UpperCAmelCase : List[str] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-4] , "œ" )
self.assertEqual(vocab_keys[-2] , "<mask>" )
self.assertEqual(vocab_keys[-1] , "<ctc_blank>" )
self.assertEqual(len(_a ) , 81 )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 79 )
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_UpperCAmelCase : int = tokenizer.vocab_size
_UpperCAmelCase : str = len(_a )
self.assertNotEqual(_a , 0 )
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
_UpperCAmelCase : List[str] = ["""aaaaa bbbbbb""", """cccccccccdddddddd"""]
_UpperCAmelCase : List[str] = tokenizer.add_tokens(_a )
_UpperCAmelCase : List[str] = tokenizer.vocab_size
_UpperCAmelCase : Tuple = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size + len(_a ) )
_UpperCAmelCase : Optional[Any] = tokenizer.encode("aaaaa bbbbbb low cccccccccdddddddd l" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 4 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
_UpperCAmelCase : Tuple = {"""eos_token""": """>>>>|||<||<<|<<""", """pad_token""": """<<<<<|||>|>>>>|>"""}
_UpperCAmelCase : Union[str, Any] = tokenizer.add_special_tokens(_a )
_UpperCAmelCase : Optional[Any] = tokenizer.vocab_size
_UpperCAmelCase : Tuple = len(_a )
self.assertNotEqual(_a , 0 )
self.assertEqual(_a , _a )
self.assertEqual(_a , len(_a ) )
self.assertEqual(_a , all_size_a + len(_a ) )
_UpperCAmelCase : List[Any] = tokenizer.encode(
">>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l" , add_special_tokens=_a )
self.assertGreaterEqual(len(_a ) , 6 )
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[0] , tokens[1] )
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1 )
self.assertGreater(tokens[-3] , tokens[-4] )
self.assertEqual(tokens[0] , tokenizer.eos_token_id )
self.assertEqual(tokens[-3] , tokenizer.pad_token_id )
def lowerCAmelCase__ ( self : List[Any] ) ->List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.get_tokenizer()
_UpperCAmelCase : str = tokenizer.tokenize("This is a test" )
# fmt: off
self.assertListEqual(_a , [SPIECE_UNDERLINE, "T", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "a", SPIECE_UNDERLINE, "t", "e", "s", "t"] )
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
_UpperCAmelCase : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "92000", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
_UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(_a )
# fmt: off
self.assertListEqual(_a , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26] )
# fmt: on
_UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [SPIECE_UNDERLINE, "I", SPIECE_UNDERLINE, "w", "a", "s", SPIECE_UNDERLINE, "b", "o", "r", "n", SPIECE_UNDERLINE, "i", "n", SPIECE_UNDERLINE, "<unk>", ",", SPIECE_UNDERLINE, "a", "n", "d", SPIECE_UNDERLINE, "t", "h", "i", "s", SPIECE_UNDERLINE, "i", "s", SPIECE_UNDERLINE, "f", "a", "l", "s", "é", "."] )
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [
"""Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides """
"""general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural """
"""Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained """
"""models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.""",
"""BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly """
"""conditioning on both left and right context in all layers.""",
"""The quick brown fox jumps over the lazy dog.""",
]
# fmt: off
_UpperCAmelCase : int = {
"""input_ids""": [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
"""attention_mask""": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name="microsoft/speecht5_asr" , revision="c5ef64c71905caeccde0e4462ef3f9077224c524" , sequences=_a , )
| 712
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( _snake_case ):
lowerCAmelCase : int = 'linear'
lowerCAmelCase : Tuple = 'cosine'
lowerCAmelCase : Tuple = 'cosine_with_restarts'
lowerCAmelCase : List[Any] = 'polynomial'
lowerCAmelCase : Optional[Any] = 'constant'
lowerCAmelCase : Optional[int] = 'constant_with_warmup'
lowerCAmelCase : List[Any] = 'piecewise_constant'
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = -1 ):
return LambdaLR(__UpperCAmelCase , lambda __lowerCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = -1 ):
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = -1 ):
_UpperCAmelCase : List[Any] = {}
_UpperCAmelCase : List[Any] = step_rules.split("," )
for rule_str in rule_list[:-1]:
_UpperCAmelCase , _UpperCAmelCase : str = rule_str.split(":" )
_UpperCAmelCase : List[str] = int(__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = float(__UpperCAmelCase )
_UpperCAmelCase : List[Any] = value
_UpperCAmelCase : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(__lowerCAmelCase , __lowerCAmelCase ):
def rule_func(__lowerCAmelCase ) -> float:
_UpperCAmelCase : List[str] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_UpperCAmelCase : List[str] = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=-1 ):
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.5 , __lowerCAmelCase = -1 ):
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
_UpperCAmelCase : int = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = -1 ):
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
_UpperCAmelCase : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1e-7 , __lowerCAmelCase=1.0 , __lowerCAmelCase=-1 ):
_UpperCAmelCase : Optional[int] = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_UpperCAmelCase : List[Any] = lr_init - lr_end
_UpperCAmelCase : Dict = num_training_steps - num_warmup_steps
_UpperCAmelCase : List[Any] = 1 - (current_step - num_warmup_steps) / decay_steps
_UpperCAmelCase : str = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = -1 , ):
_UpperCAmelCase : List[Any] = SchedulerType(__UpperCAmelCase )
_UpperCAmelCase : Tuple = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 713
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 0
|
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowerCamelCase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
lowerCamelCase__ = []
lowerCamelCase__ = []
lowerCamelCase__ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
lowerCamelCase__ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': F'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
'emoji': True,
},
}
]
lowerCamelCase__ = 0
for log in Path().glob('*.log'):
lowerCamelCase__ = 0
with open(log, 'r') as f:
for line in f:
lowerCamelCase__ = json.loads(line)
if line.get('nodeid', '') != "":
lowerCamelCase__ = line['nodeid']
if line.get('duration', None) is not None:
lowerCamelCase__ = F'''{line["duration"]:.4f}'''
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowerCamelCase__ = []
log.unlink()
lowerCamelCase__ = ''
lowerCamelCase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowerCamelCase__ = []
lowerCamelCase__ = {}
for test in failed_tests:
lowerCamelCase__ = test[0].split('::')
lowerCamelCase__ = data[0].split('/')[-1]
if data[0] not in filesafailed:
lowerCamelCase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowerCamelCase__ = [test[0] for test in failed_table]
lowerCamelCase__ = list(set(files))
# Count number of instances in failed_tests
lowerCamelCase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowerCamelCase__ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
lowerCamelCase__ = 'Too many failed tests, please see the full report in the Action results.'
lowerCamelCase__ = len(err) + 10
lowerCamelCase__ = message[: 3_000 - offset] + F'''\n...\n```\n{err}'''
print(F'''### {message}''')
else:
lowerCamelCase__ = 'No failed tests! 🤗'
print(F'''## {message}''')
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
lowerCamelCase__ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
lowerCamelCase__ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
lowerCamelCase__ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': F'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
lowerCamelCase__ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': F'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
lowerCamelCase__ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
lowerCamelCase__ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowerCamelCase__ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowerCamelCase__ = row[0]
else:
lowerCamelCase__ = ''
lowerCamelCase__ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': F'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 714
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 0
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCamelCase__ = {
"vocab_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/vocab.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/vocab.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/vocab.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/vocab.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/vocab.json",
},
"merges_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/merges.txt",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/merges.txt",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/merges.txt",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/merges.txt",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/merges.txt",
},
"tokenizer_file": {
"gpt2": "https://huggingface.co/gpt2/resolve/main/tokenizer.json",
"gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json",
"gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/tokenizer.json",
"gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json",
"distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/tokenizer.json",
},
}
lowerCamelCase__ = {
"gpt2": 1_024,
"gpt2-medium": 1_024,
"gpt2-large": 1_024,
"gpt2-xl": 1_024,
"distilgpt2": 1_024,
}
class lowerCAmelCase__ ( __lowerCAmelCase ):
lowerCAmelCase : str = VOCAB_FILES_NAMES
lowerCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Any = ["input_ids", "attention_mask"]
lowerCAmelCase : Dict = GPTaTokenizer
def __init__( self : Any , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Tuple="<|endoftext|>" , lowerCamelCase__ : Optional[Any]="<|endoftext|>" , lowerCamelCase__ : List[str]="<|endoftext|>" , lowerCamelCase__ : Optional[int]=False , **lowerCamelCase__ : Dict , ) ->List[str]:
'''simple docstring'''
super().__init__(
_UpperCamelCase , _UpperCamelCase , tokenizer_file=_UpperCamelCase , unk_token=_UpperCamelCase , bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , add_prefix_space=_UpperCamelCase , **_UpperCamelCase , )
_UpperCAmelCase : Optional[Any] = kwargs.pop("add_bos_token" , _UpperCamelCase )
_UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , _UpperCamelCase ) != add_prefix_space:
_UpperCAmelCase : Optional[int] = getattr(_UpperCamelCase , pre_tok_state.pop("type" ) )
_UpperCAmelCase : Dict = add_prefix_space
_UpperCAmelCase : List[Any] = pre_tok_class(**_UpperCamelCase )
_UpperCAmelCase : Optional[Any] = add_prefix_space
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[int] ) ->BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : int = kwargs.get("is_split_into_words" , _UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def lowerCAmelCase__ ( self : Dict , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Dict ) ->BatchEncoding:
'''simple docstring'''
_UpperCAmelCase : List[Any] = kwargs.get("is_split_into_words" , _UpperCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*_UpperCamelCase , **_UpperCamelCase )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase : Any = self._tokenizer.model.save(_UpperCamelCase , name=_UpperCamelCase )
return tuple(_UpperCamelCase )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : "Conversation" ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase : List[str] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase ) + [self.eos_token_id] )
if len(_UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase : str = input_ids[-self.model_max_length :]
return input_ids
| 715
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 0
|
'''simple docstring'''
from math import factorial
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
_UpperCAmelCase : Dict = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_UpperCAmelCase : Union[str, Any] = float(factorial(_lowerCamelCase ) )
coefficient /= factorial(_lowerCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 716
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : int ) ->Tuple:
'''simple docstring'''
debug_launcher(test_script.main )
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
debug_launcher(test_ops.main )
| 717
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 0
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCamelCase_ )
_UpperCAmelCase : Optional[Any] = -1
_UpperCAmelCase : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
_UpperCAmelCase : List[Any] = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ )
_UpperCAmelCase : Optional[int] = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : Tuple = TextStreamer(lowerCamelCase_ )
model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : Tuple = cs.out[:-1]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCamelCase_ )
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
_UpperCAmelCase : List[Any] = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ )
_UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] )
_UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(lowerCamelCase_ )
_UpperCAmelCase : List[Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[int] = Thread(target=model.generate , kwargs=lowerCamelCase_ )
thread.start()
_UpperCAmelCase : Union[str, Any] = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( self : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCamelCase_ )
_UpperCAmelCase : Dict = -1
_UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
_UpperCAmelCase : Optional[Any] = model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ )
_UpperCAmelCase : str = greedy_ids[:, input_ids.shape[1] :]
_UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_UpperCAmelCase : int = TextStreamer(lowerCamelCase_ , skip_prompt=lowerCamelCase_ )
model.generate(lowerCamelCase_ , max_new_tokens=10 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCAmelCase : List[str] = cs.out[:-1]
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = AutoTokenizer.from_pretrained("distilgpt2" )
_UpperCAmelCase : Tuple = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(lowerCamelCase_ )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : str = torch.ones((1, 5) , device=lowerCamelCase_ ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCAmelCase : Tuple = TextStreamer(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ )
model.generate(lowerCamelCase_ , max_new_tokens=1 , do_sample=lowerCamelCase_ , streamer=lowerCamelCase_ )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCAmelCase : str = cs.out[:-1] # Remove the final "\n"
_UpperCAmelCase : int = tokenizer(lowerCamelCase_ , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowerCamelCase_ )
_UpperCAmelCase : List[str] = -1
_UpperCAmelCase : int = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowerCamelCase_ )
_UpperCAmelCase : List[Any] = TextIteratorStreamer(lowerCamelCase_ , timeout=0.0_0_1 )
_UpperCAmelCase : Tuple = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
_UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=lowerCamelCase_ )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(lowerCamelCase_ ):
_UpperCAmelCase : List[Any] = ''''''
for new_text in streamer:
streamer_text += new_text
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 0
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
_UpperCAmelCase : List[Any] = [image]
_UpperCAmelCase : Optional[Any] = [trans(img.convert("RGB" ) ) for img in image]
_UpperCAmelCase : Tuple = torch.stack(_A )
return image
class lowerCAmelCase__ ( _UpperCAmelCase ):
def __init__( self : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Any ) ->Optional[Any]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Dict = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any] ) ->Any:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any = min(int(num_inference_steps * strength ) , __UpperCamelCase )
_UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str=None ) ->Tuple:
'''simple docstring'''
if not isinstance(__UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__UpperCamelCase )}""" )
_UpperCAmelCase : str = image.to(device=__UpperCamelCase , dtype=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : Union[str, Any] = init_latents.shape
_UpperCAmelCase : Optional[int] = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
# get latents
print("add noise to latents at timestep" , __UpperCamelCase )
_UpperCAmelCase : Union[str, Any] = self.scheduler.add_noise(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase : Tuple = init_latents
return latents
@torch.no_grad()
def __call__( self : int , lowerCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase__ : float = 0.8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(__UpperCamelCase )
# 2. Preprocess image
_UpperCAmelCase : Union[str, Any] = preprocess(__UpperCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
_UpperCAmelCase , _UpperCAmelCase : int = self.get_timesteps(__UpperCamelCase , __UpperCamelCase , self.device )
_UpperCAmelCase : str = timesteps[:1].repeat(__UpperCamelCase )
# 4. Prepare latent variables
_UpperCAmelCase : Tuple = self.prepare_latents(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.unet.dtype , self.device , __UpperCamelCase )
_UpperCAmelCase : int = latents
# 5. Denoising loop
for t in self.progress_bar(__UpperCamelCase ):
# 1. predict noise model_output
_UpperCAmelCase : Dict = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , eta=__UpperCamelCase , use_clipped_model_output=__UpperCamelCase , generator=__UpperCamelCase , ).prev_sample
_UpperCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : Tuple = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__UpperCamelCase )
| 719
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : int , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Any=10 , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : Optional[Any]=32 * 4 , lowerCamelCase__ : int=32 * 6 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : List[str]=32 , ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Any = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : List[Any] = use_auxiliary_loss
_UpperCAmelCase : List[Any] = num_queries
_UpperCAmelCase : List[str] = num_channels
_UpperCAmelCase : List[str] = min_size
_UpperCAmelCase : str = max_size
_UpperCAmelCase : Tuple = num_labels
_UpperCAmelCase : Optional[int] = mask_feature_size
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
_UpperCAmelCase : Optional[Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
_UpperCAmelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
_UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_28 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCAmelCase : Tuple = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : Any = output.pixel_decoder_hidden_states
_UpperCAmelCase : Union[str, Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict=False ) ->Optional[Any]:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : int = MaskFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_UpperCAmelCase : str = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_UpperCAmelCase : Dict = model(snake_case_ , output_hidden_states=snake_case_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
_UpperCAmelCase : Any = MaskFormerForInstanceSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Optional[Any] = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
_UpperCAmelCase : int = model(snake_case_ )
comm_check_on_output(snake_case_ )
_UpperCAmelCase : str = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( _a , _a , unittest.TestCase ):
lowerCAmelCase : Union[str, Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCAmelCase : List[Any] = (
{"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : int = False
lowerCAmelCase : List[Any] = False
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskFormerModelTester(self )
_UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason="MaskFormer does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Tuple = model_class(snake_case_ )
_UpperCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Dict:
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
_UpperCAmelCase : List[str] = MaskFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Tuple = (self.model_tester.min_size,) * 2
_UpperCAmelCase : List[str] = {
"pixel_values": torch.randn((2, 3, *size) , device=snake_case_ ),
"mask_labels": torch.randn((2, 10, *size) , device=snake_case_ ),
"class_labels": torch.zeros(2 , 10 , device=snake_case_ ).long(),
}
_UpperCAmelCase : Dict = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ )
_UpperCAmelCase : Union[str, Any] = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = model_class(snake_case_ ).to(snake_case_ )
_UpperCAmelCase : Any = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_UpperCAmelCase : Optional[int] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Dict = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase : Tuple = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = True
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : List[str] = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
_UpperCAmelCase : Optional[int] = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
_UpperCAmelCase : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : List[str] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_UpperCAmelCase : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" )
if is_vision_available()
else None
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(snake_case_ )
_UpperCAmelCase : Union[str, Any] = self.default_image_processor
_UpperCAmelCase : int = prepare_img()
_UpperCAmelCase : Optional[Any] = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**snake_case_ )
_UpperCAmelCase : int = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_UpperCAmelCase : int = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
_UpperCAmelCase : Dict = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase : List[str] = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase : int = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_UpperCAmelCase : Any = model(**snake_case_ )
# masks_queries_logits
_UpperCAmelCase : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase : List[Any] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
_UpperCAmelCase : Optional[int] = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_UpperCAmelCase : Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : Tuple = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase : Any = self.default_image_processor
_UpperCAmelCase : List[Any] = prepare_img()
_UpperCAmelCase : List[str] = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ )
_UpperCAmelCase : Optional[int] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 8_00, 10_88) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**snake_case_ )
# masks_queries_logits
_UpperCAmelCase : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
_UpperCAmelCase : List[str] = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
_UpperCAmelCase : Union[str, Any] = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
_UpperCAmelCase : Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : Optional[Any] = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" )
.to(snake_case_ )
.eval()
)
_UpperCAmelCase : Union[str, Any] = self.default_image_processor
_UpperCAmelCase : Dict = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : Union[str, Any] = inputs["pixel_values"].to(snake_case_ )
_UpperCAmelCase : str = [el.to(snake_case_ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : Any = [el.to(snake_case_ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : str = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 720
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = sorted(numsa + numsa )
_UpperCAmelCase : Dict = divmod(len(__UpperCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = [float(x) for x in input('Enter the elements of first array: ').split()]
lowerCamelCase__ = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 721
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ):
_UpperCAmelCase : Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
_UpperCAmelCase : Optional[Any] = 1 - (matter_density + radiation_density + dark_energy)
_UpperCAmelCase : Optional[Any] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_UpperCAmelCase : Dict = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCamelCase__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 700
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCAmelCase__ ( lowercase__ ):
lowerCAmelCase : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase : Optional[List[bool]]
lowerCAmelCase : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 701
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 0
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = old_name
if "patch_embed" in old_name:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = old_name.split("." )
if layer == "0":
_UpperCAmelCase : Union[str, Any] = old_name.replace("0" , "convolution1" )
elif layer == "1":
_UpperCAmelCase : Union[str, Any] = old_name.replace("1" , "batchnorm_before" )
elif layer == "3":
_UpperCAmelCase : Optional[int] = old_name.replace("3" , "convolution2" )
else:
_UpperCAmelCase : List[str] = old_name.replace("4" , "batchnorm_after" )
if "network" in old_name and re.search(R"\d\.\d" , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = R"\b\d{2}\b"
if bool(re.search(__lowerCAmelCase , __lowerCAmelCase ) ):
_UpperCAmelCase : List[Any] = re.search(R"\d\.\d\d." , __lowerCAmelCase ).group()
else:
_UpperCAmelCase : Dict = re.search(R"\d\.\d." , __lowerCAmelCase ).group()
if int(match[0] ) < 6:
_UpperCAmelCase : int = old_name.replace(__lowerCAmelCase , "" )
_UpperCAmelCase : Tuple = trimmed_name.replace("network" , match[0] + ".meta4D_layers.blocks." + match[2:-1] )
_UpperCAmelCase : Union[str, Any] = "intermediate_stages." + trimmed_name
else:
_UpperCAmelCase : Tuple = old_name.replace(__lowerCAmelCase , "" )
if int(match[2] ) < num_meta4D_last_stage:
_UpperCAmelCase : Union[str, Any] = trimmed_name.replace("network" , "meta4D_layers.blocks." + match[2] )
else:
_UpperCAmelCase : Union[str, Any] = str(int(match[2] ) - num_meta4D_last_stage )
_UpperCAmelCase : Any = trimmed_name.replace("network" , "meta3D_layers.blocks." + layer_index )
if "norm1" in old_name:
_UpperCAmelCase : List[str] = trimmed_name.replace("norm1" , "layernorm1" )
elif "norm2" in old_name:
_UpperCAmelCase : Optional[int] = trimmed_name.replace("norm2" , "layernorm2" )
elif "fc1" in old_name:
_UpperCAmelCase : Optional[int] = trimmed_name.replace("fc1" , "linear_in" )
elif "fc2" in old_name:
_UpperCAmelCase : Any = trimmed_name.replace("fc2" , "linear_out" )
_UpperCAmelCase : List[str] = "last_stage." + trimmed_name
elif "network" in old_name and re.search(R".\d." , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = old_name.replace("network" , "intermediate_stages" )
if "fc" in new_name:
_UpperCAmelCase : int = new_name.replace("fc" , "convolution" )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
_UpperCAmelCase : Any = new_name.replace("norm1" , "batchnorm_before" )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
_UpperCAmelCase : int = new_name.replace("norm2" , "batchnorm_after" )
if "proj" in new_name:
_UpperCAmelCase : Optional[int] = new_name.replace("proj" , "projection" )
if "dist_head" in new_name:
_UpperCAmelCase : List[Any] = new_name.replace("dist_head" , "distillation_classifier" )
elif "head" in new_name:
_UpperCAmelCase : List[str] = new_name.replace("head" , "classifier" )
elif "patch_embed" in new_name:
_UpperCAmelCase : Tuple = "efficientformer." + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
_UpperCAmelCase : int = new_name.replace("norm" , "layernorm" )
_UpperCAmelCase : Optional[int] = "efficientformer." + new_name
else:
_UpperCAmelCase : List[str] = "efficientformer.encoder." + new_name
return new_name
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
for key in checkpoint.copy().keys():
_UpperCAmelCase : Optional[Any] = checkpoint.pop(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = val
return checkpoint
def __lowerCAmelCase ():
_UpperCAmelCase : Optional[int] = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : List[Any] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return image
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = torch.load(__lowerCAmelCase , map_location="cpu" )["model"]
_UpperCAmelCase : List[Any] = EfficientFormerConfig.from_json_file(__lowerCAmelCase )
_UpperCAmelCase : List[str] = EfficientFormerForImageClassificationWithTeacher(__lowerCAmelCase )
_UpperCAmelCase : str = "_".join(checkpoint_path.split("/" )[-1].split("." )[0].split("_" )[:-1] )
_UpperCAmelCase : List[str] = config.depths[-1] - config.num_metaad_blocks + 1
_UpperCAmelCase : Optional[Any] = convert_torch_checkpoint(__lowerCAmelCase , __lowerCAmelCase )
model.load_state_dict(__lowerCAmelCase )
model.eval()
_UpperCAmelCase : List[str] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
# prepare image
_UpperCAmelCase : Dict = prepare_img()
_UpperCAmelCase : List[Any] = 256
_UpperCAmelCase : List[str] = 224
_UpperCAmelCase : int = EfficientFormerImageProcessor(
size={"shortest_edge": image_size} , crop_size={"height": crop_size, "width": crop_size} , resample=pillow_resamplings["bicubic"] , )
_UpperCAmelCase : List[Any] = processor(images=__lowerCAmelCase , return_tensors="pt" ).pixel_values
# original processing pipeline
_UpperCAmelCase : Dict = Compose(
[
Resize(__lowerCAmelCase , interpolation=pillow_resamplings["bicubic"] ),
CenterCrop(__lowerCAmelCase ),
ToTensor(),
Normalize(__lowerCAmelCase , __lowerCAmelCase ),
] )
_UpperCAmelCase : List[str] = image_transforms(__lowerCAmelCase ).unsqueeze(0 )
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = model(__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.logits
_UpperCAmelCase : Optional[Any] = (1, 1_000)
if "l1" in model_name:
_UpperCAmelCase : List[str] = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
_UpperCAmelCase : Union[str, Any] = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , __lowerCAmelCase , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
_UpperCAmelCase : List[str] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
F"""Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7""" )
# Save Checkpoints
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
print(F"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
processor.save_pretrained(__lowerCAmelCase )
print(F"""Processor successfuly saved at {pytorch_dump_path}""" )
if push_to_hub:
print("Pushing model to the hub..." )
model.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add model" , use_temp_dir=__lowerCAmelCase , )
processor.push_to_hub(
repo_id=F"""Bearnardd/{pytorch_dump_path}""" , commit_message="Add image processor" , use_temp_dir=__lowerCAmelCase , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path',
default=None,
type=str,
required=True,
help='Path to EfficientFormer pytorch checkpoint.',
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for EfficientFormer model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 702
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( UpperCamelCase_ ):
lowerCAmelCase : Optional[Any] = "swin2sr"
lowerCAmelCase : Union[str, Any] = {
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[str] , lowerCamelCase__ : Tuple=64 , lowerCamelCase__ : Optional[int]=1 , lowerCamelCase__ : List[str]=3 , lowerCamelCase__ : List[Any]=1_80 , lowerCamelCase__ : List[str]=[6, 6, 6, 6, 6, 6] , lowerCamelCase__ : Optional[int]=[6, 6, 6, 6, 6, 6] , lowerCamelCase__ : List[str]=8 , lowerCamelCase__ : str=2.0 , lowerCamelCase__ : Tuple=True , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Tuple=0.0 , lowerCamelCase__ : List[str]=0.1 , lowerCamelCase__ : Tuple="gelu" , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Tuple=1E-5 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Dict=1.0 , lowerCamelCase__ : List[Any]="1conv" , lowerCamelCase__ : Dict="pixelshuffle" , **lowerCamelCase__ : Optional[Any] , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**__A )
_UpperCAmelCase : Dict = image_size
_UpperCAmelCase : Tuple = patch_size
_UpperCAmelCase : Tuple = num_channels
_UpperCAmelCase : List[Any] = embed_dim
_UpperCAmelCase : str = depths
_UpperCAmelCase : str = len(__A )
_UpperCAmelCase : str = num_heads
_UpperCAmelCase : Tuple = window_size
_UpperCAmelCase : Optional[int] = mlp_ratio
_UpperCAmelCase : List[Any] = qkv_bias
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : str = drop_path_rate
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : List[str] = use_absolute_embeddings
_UpperCAmelCase : str = layer_norm_eps
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : str = upscale
_UpperCAmelCase : Tuple = img_range
_UpperCAmelCase : List[str] = resi_connection
_UpperCAmelCase : List[Any] = upsampler
| 703
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"edbeeching/decision-transformer-gym-hopper-medium": (
"https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class lowerCAmelCase__ ( lowercase__ ):
lowerCAmelCase : Optional[Any] = "decision_transformer"
lowerCAmelCase : Tuple = ["past_key_values"]
lowerCAmelCase : int = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : str , lowerCamelCase__ : Optional[int]=17 , lowerCamelCase__ : Any=4 , lowerCamelCase__ : int=1_28 , lowerCamelCase__ : Tuple=40_96 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : int=1 , lowerCamelCase__ : Any=10_24 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : List[str]=1 , lowerCamelCase__ : str=None , lowerCamelCase__ : Dict="relu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=0.1 , lowerCamelCase__ : Tuple=1E-5 , lowerCamelCase__ : str=0.0_2 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Tuple=5_02_56 , lowerCamelCase__ : List[Any]=5_02_56 , lowerCamelCase__ : int=False , lowerCamelCase__ : Optional[Any]=False , **lowerCamelCase__ : List[Any] , ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = state_dim
_UpperCAmelCase : Tuple = act_dim
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : Optional[int] = max_ep_len
_UpperCAmelCase : List[str] = action_tanh
_UpperCAmelCase : Any = vocab_size
_UpperCAmelCase : str = n_positions
_UpperCAmelCase : int = n_layer
_UpperCAmelCase : List[str] = n_head
_UpperCAmelCase : Dict = n_inner
_UpperCAmelCase : Tuple = activation_function
_UpperCAmelCase : List[Any] = resid_pdrop
_UpperCAmelCase : Tuple = embd_pdrop
_UpperCAmelCase : Dict = attn_pdrop
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : List[str] = scale_attn_weights
_UpperCAmelCase : List[str] = use_cache
_UpperCAmelCase : Union[str, Any] = scale_attn_by_inverse_layer_idx
_UpperCAmelCase : List[Any] = reorder_and_upcast_attn
_UpperCAmelCase : int = bos_token_id
_UpperCAmelCase : Any = eos_token_id
super().__init__(bos_token_id=lowerCamelCase__ , eos_token_id=lowerCamelCase__ , **lowerCamelCase__ )
| 704
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def __lowerCAmelCase (__lowerCAmelCase = 2_000_000 ):
_UpperCAmelCase : list[int] = [0]
_UpperCAmelCase : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
_UpperCAmelCase : int = 0
# the area corresponding to the grid that gives the product closest to target
_UpperCAmelCase : int = 0
# an estimate of b, using the quadratic formula
_UpperCAmelCase : float
# the largest integer less than b_estimate
_UpperCAmelCase : int
# the largest integer less than b_estimate
_UpperCAmelCase : int
# the triangle number corresponding to b_floor
_UpperCAmelCase : int
# the triangle number corresponding to b_ceil
_UpperCAmelCase : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
_UpperCAmelCase : Tuple = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
_UpperCAmelCase : Tuple = floor(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = ceil(__lowerCAmelCase )
_UpperCAmelCase : Tuple = triangle_numbers[b_floor]
_UpperCAmelCase : Optional[Any] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
_UpperCAmelCase : str = triangle_b_first_guess * triangle_a
_UpperCAmelCase : List[Any] = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
_UpperCAmelCase : Optional[int] = triangle_b_second_guess * triangle_a
_UpperCAmelCase : int = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'''{solution() = }''')
| 705
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = set(snake_case__ ), [start]
while stack:
_UpperCAmelCase : Dict = stack.pop()
explored.add(snake_case__ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(snake_case__ )
return explored
lowerCamelCase__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 706
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 0
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest"]
# fmt: on
_UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
_UpperCAmelCase : Dict = {
"do_resize": True,
"size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.5, 0.5, 0.5],
"image_std": [0.5, 0.5, 0.5],
}
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , __lowerCAmelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase__ ( self : Dict , **lowerCamelCase__ : Dict ) ->int:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowerCAmelCase__ ( self : Dict , **lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase : str = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_UpperCAmelCase : Optional[Any] = [Image.fromarray(np.moveaxis(__lowerCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
_UpperCAmelCase : Optional[Any] = self.get_image_processor()
_UpperCAmelCase : List[str] = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Any = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase : Any = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
_UpperCAmelCase : List[Any] = self.get_image_processor(do_normalize=__lowerCAmelCase , padding_value=1.0 )
_UpperCAmelCase : int = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__lowerCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_image_processor()
_UpperCAmelCase : Tuple = self.get_tokenizer()
_UpperCAmelCase : Tuple = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_UpperCAmelCase : Tuple = self.prepare_image_inputs()
_UpperCAmelCase : List[Any] = image_processor(__lowerCAmelCase , return_tensors="np" )
_UpperCAmelCase : Optional[int] = processor(images=__lowerCAmelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = self.get_image_processor()
_UpperCAmelCase : Optional[Any] = self.get_tokenizer()
_UpperCAmelCase : str = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_UpperCAmelCase : str = "lower newer"
_UpperCAmelCase : Tuple = processor(text=__lowerCAmelCase )
_UpperCAmelCase : List[str] = tokenizer(__lowerCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.get_image_processor()
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
_UpperCAmelCase : int = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = "lower newer"
_UpperCAmelCase : Union[str, Any] = self.prepare_image_inputs()
_UpperCAmelCase : List[str] = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(__lowerCAmelCase ):
processor()
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : int = self.get_tokenizer()
_UpperCAmelCase : str = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_UpperCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase : Dict = processor.batch_decode(__lowerCAmelCase )
_UpperCAmelCase : List[str] = tokenizer.batch_decode(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.get_image_processor()
_UpperCAmelCase : List[str] = self.get_tokenizer()
_UpperCAmelCase : List[str] = VisionTextDualEncoderProcessor(tokenizer=__lowerCAmelCase , image_processor=__lowerCAmelCase )
_UpperCAmelCase : str = "lower newer"
_UpperCAmelCase : Optional[Any] = self.prepare_image_inputs()
_UpperCAmelCase : Union[str, Any] = processor(text=__lowerCAmelCase , images=__lowerCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 707
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 0
|
'''simple docstring'''
import colorsys
from PIL import Image # type: ignore
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = x
_UpperCAmelCase : int = y
for step in range(UpperCAmelCase__ ): # noqa: B007
_UpperCAmelCase : Tuple = a * a - b * b + x
_UpperCAmelCase : int = 2 * a * b + y
_UpperCAmelCase : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowerCAmelCase (__lowerCAmelCase ):
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __lowerCAmelCase (__lowerCAmelCase ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(UpperCAmelCase__ , 1 , 1 ) )
def __lowerCAmelCase (__lowerCAmelCase = 800 , __lowerCAmelCase = 600 , __lowerCAmelCase = -0.6 , __lowerCAmelCase = 0 , __lowerCAmelCase = 3.2 , __lowerCAmelCase = 50 , __lowerCAmelCase = True , ):
_UpperCAmelCase : str = Image.new("RGB" , (image_width, image_height) )
_UpperCAmelCase : int = img.load()
# loop through the image-coordinates
for image_x in range(UpperCAmelCase__ ):
for image_y in range(UpperCAmelCase__ ):
# determine the figure-coordinates based on the image-coordinates
_UpperCAmelCase : Dict = figure_width / image_width * image_height
_UpperCAmelCase : Any = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCAmelCase : Optional[int] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCAmelCase : List[Any] = get_distance(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCAmelCase : Union[str, Any] = get_color_coded_rgb(UpperCAmelCase__ )
else:
_UpperCAmelCase : Optional[Any] = get_black_and_white_rgb(UpperCAmelCase__ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 708
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 0
|
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
lowerCamelCase__ = logging.getLogger(__name__)
class lowerCAmelCase__ ( a__ ):
lowerCAmelCase : Optional[int] = """summarization"""
lowerCAmelCase : int = ["""loss"""]
lowerCAmelCase : Dict = ROUGE_KEYS
lowerCAmelCase : Optional[Any] = """rouge2"""
def __init__( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ) ->Tuple:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
_UpperCAmelCase : Union[str, Any] = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(lowerCAmelCase__ , num_labels=lowerCAmelCase__ , mode=self.mode , **lowerCAmelCase__ )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
_UpperCAmelCase : Optional[Any] = Path(self.output_dir ) / "metrics.json"
_UpperCAmelCase : int = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Union[str, Any] = defaultdict(lowerCAmelCase__ )
_UpperCAmelCase : Any = self.config.model_type
_UpperCAmelCase : str = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
_UpperCAmelCase : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
_UpperCAmelCase : Tuple = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
_UpperCAmelCase : Optional[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
_UpperCAmelCase : str = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F"""target_lens: {self.target_lens}"""
assert self.target_lens["train"] <= self.target_lens["test"], F"""target_lens: {self.target_lens}"""
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
_UpperCAmelCase : Optional[Any] = get_git_info()["repo_sha"]
_UpperCAmelCase : List[Any] = hparams.num_workers
_UpperCAmelCase : Union[str, Any] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
_UpperCAmelCase : Optional[Any] = self.decoder_start_token_id
_UpperCAmelCase : Union[str, Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : int = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
_UpperCAmelCase : Any = self.hparams.eval_max_gen_length
else:
_UpperCAmelCase : Optional[int] = self.model.config.max_length
_UpperCAmelCase : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict[str, List[str]]:
'''simple docstring'''
_UpperCAmelCase : str = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCAmelCase__ , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
_UpperCAmelCase : Any = True
return readable_batch
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Tuple , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
return self.model(lowerCAmelCase__ , **lowerCAmelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict ) ->int:
'''simple docstring'''
_UpperCAmelCase : str = self.tokenizer.batch_decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__ )
return lmap(str.strip , lowerCAmelCase__ )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.tokenizer.pad_token_id
_UpperCAmelCase : Optional[int] = batch["input_ids"], batch["attention_mask"]
_UpperCAmelCase : Tuple = batch["labels"]
if isinstance(self.model , lowerCAmelCase__ ):
_UpperCAmelCase : Optional[int] = self.model._shift_right(lowerCAmelCase__ )
else:
_UpperCAmelCase : Optional[int] = shift_tokens_right(lowerCAmelCase__ , lowerCAmelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
_UpperCAmelCase : List[str] = decoder_input_ids
self.save_readable_batch(lowerCAmelCase__ )
_UpperCAmelCase : Any = self(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
_UpperCAmelCase : Any = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
_UpperCAmelCase : List[str] = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
_UpperCAmelCase : Optional[int] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
_UpperCAmelCase : Tuple = nn.functional.log_softmax(lowerCAmelCase__ , dim=-1 )
_UpperCAmelCase : Any = label_smoothed_nll_loss(
lowerCAmelCase__ , lowerCAmelCase__ , self.hparams.label_smoothing , ignore_index=lowerCAmelCase__ )
return (loss,)
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self._step(lowerCAmelCase__ )
_UpperCAmelCase : Dict = dict(zip(self.loss_names , lowerCAmelCase__ ) )
# tokens per batch
_UpperCAmelCase : List[Any] = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
_UpperCAmelCase : List[Any] = batch["input_ids"].shape[0]
_UpperCAmelCase : Any = batch["input_ids"].eq(self.pad ).sum()
_UpperCAmelCase : str = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] ) ->Dict:
'''simple docstring'''
return self._generative_step(lowerCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str]="val" ) ->Dict:
'''simple docstring'''
self.step_count += 1
_UpperCAmelCase : List[str] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
_UpperCAmelCase : int = losses["loss"]
_UpperCAmelCase : Any = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
_UpperCAmelCase : Optional[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
_UpperCAmelCase : torch.FloatTensor = torch.tensor(lowerCAmelCase__ ).type_as(lowerCAmelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = {F"""{prefix}_avg_{k}""": x for k, x in losses.items()}
_UpperCAmelCase : List[Any] = self.step_count
self.metrics[prefix].append(lowerCAmelCase__ ) # callback writes this to self.metrics_save_path
_UpperCAmelCase : Optional[int] = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F"""{prefix}_loss""": loss,
F"""{prefix}_{self.val_metric}""": metric_tensor,
}
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) ->Dict:
'''simple docstring'''
return calculate_rouge(lowerCAmelCase__ , lowerCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Union[str, Any] ) ->dict:
'''simple docstring'''
_UpperCAmelCase : Any = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
_UpperCAmelCase : Optional[int] = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=lowerCAmelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
_UpperCAmelCase : str = (time.time() - ta) / batch["input_ids"].shape[0]
_UpperCAmelCase : List[str] = self.ids_to_clean_text(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = self.ids_to_clean_text(batch["labels"] )
_UpperCAmelCase : int = self._step(lowerCAmelCase__ )
_UpperCAmelCase : int = dict(zip(self.loss_names , lowerCAmelCase__ ) )
_UpperCAmelCase : Dict = self.calc_generative_metrics(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = np.mean(lmap(lowerCAmelCase__ , lowerCAmelCase__ ) )
base_metrics.update(gen_time=lowerCAmelCase__ , gen_len=lowerCAmelCase__ , preds=lowerCAmelCase__ , target=lowerCAmelCase__ , **lowerCAmelCase__ )
return base_metrics
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int ) ->List[Any]:
'''simple docstring'''
return self._generative_step(lowerCAmelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[Any] ) ->Optional[Any]:
'''simple docstring'''
return self.validation_epoch_end(lowerCAmelCase__ , prefix="test" )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Dict ) ->SeqaSeqDataset:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.n_obs[type_path]
_UpperCAmelCase : List[str] = self.target_lens[type_path]
_UpperCAmelCase : Any = self.dataset_class(
self.tokenizer , type_path=lowerCAmelCase__ , n_obs=lowerCAmelCase__ , max_target_length=lowerCAmelCase__ , **self.dataset_kwargs , )
return dataset
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : int = False ) ->DataLoader:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.get_dataset(lowerCAmelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
_UpperCAmelCase : int = dataset.make_sortish_sampler(lowerCAmelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
_UpperCAmelCase : Union[str, Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase__ , batch_sampler=lowerCAmelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCAmelCase__ , batch_size=lowerCAmelCase__ , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase__ , num_workers=self.num_workers , sampler=lowerCAmelCase__ , )
def lowerCAmelCase__ ( self : List[str] ) ->DataLoader:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase__ )
return dataloader
def lowerCAmelCase__ ( self : List[str] ) ->DataLoader:
'''simple docstring'''
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def lowerCAmelCase__ ( self : Optional[int] ) ->DataLoader:
'''simple docstring'''
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any ) ->Optional[int]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(lowerCAmelCase__ , lowerCAmelCase__ )
add_generic_args(lowerCAmelCase__ , lowerCAmelCase__ )
parser.add_argument(
"--max_source_length" , default=10_24 , type=lowerCAmelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=lowerCAmelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=1_42 , type=lowerCAmelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=1_42 , type=lowerCAmelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=lowerCAmelCase__ )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=lowerCAmelCase__ )
parser.add_argument("--max_tokens_per_batch" , type=lowerCAmelCase__ , default=lowerCAmelCase__ )
parser.add_argument("--logger_name" , type=lowerCAmelCase__ , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=lowerCAmelCase__ , default=5_00 , required=lowerCAmelCase__ , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=lowerCAmelCase__ , default="summarization" , required=lowerCAmelCase__ , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=lowerCAmelCase__ , default=0.0 , required=lowerCAmelCase__ )
parser.add_argument("--src_lang" , type=lowerCAmelCase__ , default="" , required=lowerCAmelCase__ )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase__ , default="" , required=lowerCAmelCase__ )
parser.add_argument("--eval_beams" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ )
parser.add_argument(
"--val_metric" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , required=lowerCAmelCase__ , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=lowerCAmelCase__ , default=1 , required=lowerCAmelCase__ , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=lowerCAmelCase__ , default=-1 , required=lowerCAmelCase__ , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class lowerCAmelCase__ ( a__ ):
lowerCAmelCase : Tuple = """translation"""
lowerCAmelCase : Optional[int] = ["""loss"""]
lowerCAmelCase : Tuple = ["""bleu"""]
lowerCAmelCase : Tuple = """bleu"""
def __init__( self : Optional[int] , lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : str ) ->Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = hparams.src_lang
_UpperCAmelCase : Union[str, Any] = hparams.tgt_lang
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict ) ->dict:
'''simple docstring'''
return calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__ )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=_lowercase )
check_output_dir(_lowercase , expected_items=3 )
if model is None:
if "summarization" in args.task:
_UpperCAmelCase : SummarizationModule = SummarizationModule(_lowercase )
else:
_UpperCAmelCase : SummarizationModule = TranslationModule(_lowercase )
_UpperCAmelCase : int = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
_UpperCAmelCase : Tuple = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
_UpperCAmelCase : Optional[int] = os.environ.get("WANDB_PROJECT" , _lowercase )
_UpperCAmelCase : Dict = WandbLogger(name=model.output_dir.name , project=_lowercase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
_UpperCAmelCase : str = WandbLogger(name=model.output_dir.name , project=F"""hf_{dataset}""" )
if args.early_stopping_patience >= 0:
_UpperCAmelCase : Tuple = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[Any] = args.val_metric == "loss"
_UpperCAmelCase : pl.Trainer = generic_train(
_lowercase , _lowercase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _lowercase ) , early_stopping_callback=_lowercase , logger=_lowercase , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
_UpperCAmelCase : Optional[Any] = ""
_UpperCAmelCase : Dict = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=_lowercase ) )
if checkpoints:
_UpperCAmelCase : Optional[int] = checkpoints[-1]
_UpperCAmelCase : int = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
lowerCamelCase__ = pl.Trainer.add_argparse_args(parser)
lowerCamelCase__ = SummarizationModule.add_model_specific_args(parser, os.getcwd())
lowerCamelCase__ = parser.parse_args()
main(args)
| 709
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = MobileBertConfig.from_json_file(UpperCAmelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase : Tuple = MobileBertForPreTraining(UpperCAmelCase__ )
# Load weights from tf checkpoint
_UpperCAmelCase : int = load_tf_weights_in_mobilebert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--mobilebert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained MobileBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 710
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 0
|
'''simple docstring'''
lowerCamelCase__ = 'Alexander Joslin'
import operator as op
from .stack import Stack
def __lowerCAmelCase (__lowerCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCAmelCase : Any = Stack()
_UpperCAmelCase : List[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_snake_case ) )
elif i in operators:
# RULE 2
operator_stack.push(_snake_case )
elif i == ")":
# RULE 4
_UpperCAmelCase : int = operator_stack.peek()
operator_stack.pop()
_UpperCAmelCase : str = operand_stack.peek()
operand_stack.pop()
_UpperCAmelCase : Tuple = operand_stack.peek()
operand_stack.pop()
_UpperCAmelCase : str = operators[opr](_snake_case , _snake_case )
operand_stack.push(_snake_case )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 711
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = "owlvit_text_model"
def __init__( self : Tuple , lowerCamelCase__ : List[Any]=4_94_08 , lowerCamelCase__ : Optional[Any]=5_12 , lowerCamelCase__ : Optional[int]=20_48 , lowerCamelCase__ : Any=12 , lowerCamelCase__ : List[str]=8 , lowerCamelCase__ : Dict=16 , lowerCamelCase__ : Tuple="quick_gelu" , lowerCamelCase__ : Dict=1E-5 , lowerCamelCase__ : Union[str, Any]=0.0 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Tuple=1.0 , lowerCamelCase__ : List[Any]=0 , lowerCamelCase__ : Optional[int]=4_94_06 , lowerCamelCase__ : Tuple=4_94_07 , **lowerCamelCase__ : Optional[int] , ) ->int:
'''simple docstring'''
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = vocab_size
_UpperCAmelCase : List[str] = hidden_size
_UpperCAmelCase : Optional[int] = intermediate_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Dict = max_position_embeddings
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : Any = attention_dropout
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[Any] = initializer_factor
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase : Tuple = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_UpperCAmelCase : str = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : int = "owlvit_vision_model"
def __init__( self : List[str] , lowerCamelCase__ : Tuple=7_68 , lowerCamelCase__ : str=30_72 , lowerCamelCase__ : List[str]=12 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Any=3 , lowerCamelCase__ : Optional[Any]=7_68 , lowerCamelCase__ : str=32 , lowerCamelCase__ : Optional[int]="quick_gelu" , lowerCamelCase__ : List[Any]=1E-5 , lowerCamelCase__ : int=0.0 , lowerCamelCase__ : int=0.0_2 , lowerCamelCase__ : Any=1.0 , **lowerCamelCase__ : Optional[int] , ) ->List[Any]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[int] = hidden_size
_UpperCAmelCase : Union[str, Any] = intermediate_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : Any = num_attention_heads
_UpperCAmelCase : int = num_channels
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : Optional[Any] = patch_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Union[str, Any] = layer_norm_eps
_UpperCAmelCase : Union[str, Any] = attention_dropout
_UpperCAmelCase : str = initializer_range
_UpperCAmelCase : Any = initializer_factor
@classmethod
def lowerCAmelCase__ ( cls : Tuple , lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("model_type" ) == "owlvit":
_UpperCAmelCase : List[Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Dict = "owlvit"
lowerCAmelCase : Union[str, Any] = True
def __init__( self : int , lowerCamelCase__ : int=None , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Optional[Any]=5_12 , lowerCamelCase__ : List[Any]=2.6_5_9_2 , lowerCamelCase__ : Any=True , **lowerCamelCase__ : Any , ) ->List[str]:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
if text_config is None:
_UpperCAmelCase : List[str] = {}
logger.info("text_config is None. Initializing the OwlViTTextConfig with default values." )
if vision_config is None:
_UpperCAmelCase : Dict = {}
logger.info("vision_config is None. initializing the OwlViTVisionConfig with default values." )
_UpperCAmelCase : Union[str, Any] = OwlViTTextConfig(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = OwlViTVisionConfig(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : int = projection_dim
_UpperCAmelCase : int = logit_scale_init_value
_UpperCAmelCase : str = return_dict
_UpperCAmelCase : Dict = 1.0
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , lowerCamelCase__ : str , **lowerCamelCase__ : List[str] ) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase : int = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = {}
_UpperCAmelCase : int = text_config
_UpperCAmelCase : List[str] = vision_config
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ )
_UpperCAmelCase : Any = self.text_config.to_dict()
_UpperCAmelCase : Optional[int] = self.vision_config.to_dict()
_UpperCAmelCase : Dict = self.__class__.model_type
return output
class lowerCAmelCase__ ( UpperCAmelCase__ ):
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("attention_mask", {0: "batch", 1: "sequence"}),
] )
@property
def lowerCAmelCase__ ( self : List[str] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("logits_per_image", {0: "batch"}),
("logits_per_text", {0: "batch"}),
("text_embeds", {0: "batch"}),
("image_embeds", {0: "batch"}),
] )
@property
def lowerCAmelCase__ ( self : Tuple ) ->float:
'''simple docstring'''
return 1E-4
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int = -1 , lowerCamelCase__ : str = -1 , lowerCamelCase__ : int = None , ) ->Mapping[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = super().generate_dummy_inputs(
processor.image_processor , batch_size=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
return {**text_input_dict, **image_input_dict}
@property
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
return 14
| 713
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 0
|
'''simple docstring'''
import contextlib
import importlib
import io
import unittest
import transformers
# Try to import everything from transformers to ensure every object can be loaded.
from transformers import * # noqa F406
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch
from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available
if is_torch_available():
from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification
if is_tf_available():
from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification
if is_flax_available():
from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification
lowerCamelCase__ = DUMMY_UNKNOWN_IDENTIFIER
# An actual model hosted on huggingface.co
lowerCamelCase__ = 'main'
# Default branch name
lowerCamelCase__ = 'f2c752cfc5c0ab6f4bdec59acea69eefbee381c2'
# One particular commit (not the top of `main`)
lowerCamelCase__ = 'aaaaaaa'
# This commit does not exist, so we should 404.
lowerCamelCase__ = 'd9e9f15bc825e4b2c9249e9578f884bbcb5e3684'
# Sha-1 of config.json on the top of `main`, for checking purposes
lowerCamelCase__ = '4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3'
@contextlib.contextmanager
def __lowerCAmelCase ():
print("Welcome!" )
yield
print("Bye!" )
@contextlib.contextmanager
def __lowerCAmelCase ():
print("Bonjour!" )
yield
print("Au revoir!" )
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
assert transformers.__spec__ is not None
assert importlib.util.find_spec("transformers" ) is not None
class lowerCAmelCase__ ( unittest.TestCase ):
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Optional[int] ) ->Any:
'''simple docstring'''
with ContextManagers([] ):
print("Transformers are awesome!" )
# The print statement adds a new line at the end of the output
self.assertEqual(mock_stdout.getvalue() , "Transformers are awesome!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
with ContextManagers([context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Welcome!\nTransformers are awesome!\nBye!\n" )
@unittest.mock.patch("sys.stdout" , new_callable=io.StringIO )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : int ) ->Tuple:
'''simple docstring'''
with ContextManagers([context_fr(), context_en()] ):
print("Transformers are awesome!" )
# The output should be wrapped with an English and French welcome and goodbye
self.assertEqual(mock_stdout.getvalue() , "Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n" )
@require_torch
def lowerCAmelCase__ ( self : int ) ->Tuple:
'''simple docstring'''
self.assertEqual(find_labels(lowerCamelCase__ ) , ["labels"] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ["start_positions", "end_positions"] )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(lowerCamelCase__ ) , ["labels"] )
@require_tf
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
self.assertEqual(find_labels(lowerCamelCase__ ) , ["labels"] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ["labels", "next_sentence_label"] )
self.assertEqual(find_labels(lowerCamelCase__ ) , ["start_positions", "end_positions"] )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(lowerCamelCase__ ) , ["labels"] )
@require_flax
def lowerCAmelCase__ ( self : List[Any] ) ->Tuple:
'''simple docstring'''
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
pass
self.assertEqual(find_labels(lowerCamelCase__ ) , [] )
| 714
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
if length <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(SCREAMING_SNAKE_CASE_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 715
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 0
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : str = ['torch']
def __init__( self : Any , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[int] ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *lowerCamelCase__ : int , **lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Any = ['torch']
def __init__( self : List[str] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[str] ) ->Dict:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : int , *lowerCamelCase__ : str , **lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : int , *lowerCamelCase__ : int , **lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Dict = ['torch']
def __init__( self : Optional[Any] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : int ) ->int:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ) ->str:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Optional[int] ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Optional[Any] = ['torch']
def __init__( self : Optional[Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : Dict ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : str , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : int , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : int = ['torch']
def __init__( self : List[Any] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : List[Any] ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : int , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : str = ['torch']
def __init__( self : Tuple , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[Any] ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : str , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , *lowerCamelCase__ : str , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Optional[Any] = ['torch']
def __init__( self : Optional[Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : int , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : int = ['torch']
def __init__( self : Optional[int] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Any ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Optional[int] = ['torch']
def __init__( self : Union[str, Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Union[str, Any] ) ->str:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : int , *lowerCamelCase__ : int , **lowerCamelCase__ : str ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Tuple = ['torch']
def __init__( self : Optional[int] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Optional[int] ) ->Dict:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Any ) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Dict = ['torch']
def __init__( self : str , *lowerCamelCase__ : int , **lowerCamelCase__ : Any ) ->int:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : str , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[str] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch"] )
def __lowerCAmelCase (*__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(_lowerCAmelCase , ["torch"] )
def __lowerCAmelCase (*__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(_lowerCAmelCase , ["torch"] )
def __lowerCAmelCase (*__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(_lowerCAmelCase , ["torch"] )
def __lowerCAmelCase (*__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(_lowerCAmelCase , ["torch"] )
def __lowerCAmelCase (*__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(_lowerCAmelCase , ["torch"] )
def __lowerCAmelCase (*__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(_lowerCAmelCase , ["torch"] )
def __lowerCAmelCase (*__lowerCAmelCase , **__lowerCAmelCase ):
requires_backends(_lowerCAmelCase , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : int = ['torch']
def __init__( self : Any , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[Any] ) ->Dict:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : int ) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Tuple = ['torch']
def __init__( self : Union[str, Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : int ) ->Any:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->str:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : str = ['torch']
def __init__( self : Tuple , *lowerCamelCase__ : Any , **lowerCamelCase__ : str ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : int ) ->str:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Any ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Any = ['torch']
def __init__( self : List[str] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Tuple ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : List[str] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Union[str, Any] = ['torch']
def __init__( self : Optional[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Tuple ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *lowerCamelCase__ : int , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *lowerCamelCase__ : int , **lowerCamelCase__ : Tuple ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Optional[int] = ['torch']
def __init__( self : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *lowerCamelCase__ : int , **lowerCamelCase__ : Any ) ->Dict:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : int ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : int = ['torch']
def __init__( self : List[Any] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Tuple ) ->Any:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Tuple ) ->Dict:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Optional[int] = ['torch']
def __init__( self : str , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Any ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[str] ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : str = ['torch']
def __init__( self : int , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ) ->str:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : int , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Any ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Tuple = ['torch']
def __init__( self : Dict , *lowerCamelCase__ : Any , **lowerCamelCase__ : Optional[Any] ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[Any] ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Dict ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Dict = ['torch']
def __init__( self : int , *lowerCamelCase__ : str , **lowerCamelCase__ : Dict ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : str , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Tuple ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *lowerCamelCase__ : Any , **lowerCamelCase__ : Any ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Dict = ['torch']
def __init__( self : Optional[int] , *lowerCamelCase__ : str , **lowerCamelCase__ : Any ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : str , *lowerCamelCase__ : Any , **lowerCamelCase__ : int ) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Tuple ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : str = ['torch']
def __init__( self : Union[str, Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Tuple ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : str , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : str ) ->str:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : str , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[Any] ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Optional[Any] = ['torch']
def __init__( self : Any , *lowerCamelCase__ : str , **lowerCamelCase__ : List[str] ) ->int:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : int , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Tuple ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *lowerCamelCase__ : str , **lowerCamelCase__ : List[Any] ) ->Dict:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : str = ['torch']
def __init__( self : Union[str, Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Any ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Tuple ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Optional[int] ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Tuple = ['torch']
def __init__( self : List[Any] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : List[str] ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Any ) ->str:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Tuple = ['torch']
def __init__( self : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Optional[Any] ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : int , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[int] ) ->Dict:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Any = ['torch']
def __init__( self : Optional[Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Dict ) ->str:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Dict ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *lowerCamelCase__ : Dict , **lowerCamelCase__ : List[Any] ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Tuple = ['torch']
def __init__( self : Tuple , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[Any] ) ->str:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[str] ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Any = ['torch']
def __init__( self : List[str] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : int , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Dict ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : int = ['torch']
def __init__( self : Optional[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : List[Any] ) ->Dict:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : str ) ->str:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *lowerCamelCase__ : Any , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Optional[int] = ['torch']
def __init__( self : List[Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : str ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : int ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : str = ['torch']
def __init__( self : Any , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : str ) ->int:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Tuple ) ->Dict:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Tuple ) ->Dict:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Optional[Any] = ['torch']
def __init__( self : Any , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : int ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Tuple ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : List[Any] = ['torch']
def __init__( self : Optional[int] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : List[Any] = ['torch']
def __init__( self : Tuple , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : int = ['torch']
def __init__( self : Any , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[Any] ) ->int:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Dict = ['torch']
def __init__( self : List[Any] , *lowerCamelCase__ : Any , **lowerCamelCase__ : Tuple ) ->List[str]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *lowerCamelCase__ : Any , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Dict = ['torch']
def __init__( self : List[Any] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[Any] ) ->List[str]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Dict ) ->int:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : List[Any] = ['torch']
def __init__( self : Union[str, Any] , *lowerCamelCase__ : str , **lowerCamelCase__ : List[Any] ) ->int:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : Tuple ) ->int:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *lowerCamelCase__ : int , **lowerCamelCase__ : List[Any] ) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Union[str, Any] = ['torch']
def __init__( self : List[str] , *lowerCamelCase__ : str , **lowerCamelCase__ : str ) ->str:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : List[str] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : str , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Tuple ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Union[str, Any] = ['torch']
def __init__( self : List[Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : int , *lowerCamelCase__ : int , **lowerCamelCase__ : List[str] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Union[str, Any] ) ->int:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : List[Any] = ['torch']
def __init__( self : str , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Optional[int] ) ->Any:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , *lowerCamelCase__ : str , **lowerCamelCase__ : int ) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Dict = ['torch']
def __init__( self : List[str] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : int ) ->Tuple:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *lowerCamelCase__ : int , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : str = ['torch']
def __init__( self : List[Any] , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Optional[int] ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Dict , *lowerCamelCase__ : int , **lowerCamelCase__ : Any ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Union[str, Any] ) ->int:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Union[str, Any] = ['torch']
def __init__( self : Optional[Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Tuple ) ->Dict:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *lowerCamelCase__ : int , **lowerCamelCase__ : Any ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , *lowerCamelCase__ : str , **lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Tuple = ['torch']
def __init__( self : int , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *lowerCamelCase__ : str , **lowerCamelCase__ : List[str] ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] , *lowerCamelCase__ : str , **lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : Optional[Any] = ['torch']
def __init__( self : Optional[int] , *lowerCamelCase__ : str , **lowerCamelCase__ : Any ) ->Dict:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : int , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Tuple ) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Tuple , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"] )
class lowerCAmelCase__ ( metaclass=__snake_case ):
lowerCAmelCase : List[Any] = ['torch']
def __init__( self : int , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : Any ) ->str:
'''simple docstring'''
requires_backends(cls , ["torch"] )
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : List[Any] ) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"] )
| 716
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
lowerCAmelCase : Optional[int] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] = None , lowerCamelCase__ : Optional[int] = 5_02_57 , lowerCamelCase__ : str = 10_24 , lowerCamelCase__ : List[Any] = 7_68 , lowerCamelCase__ : int = 12 , lowerCamelCase__ : Optional[int] = 12 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[int] = "gelu_new" , lowerCamelCase__ : List[Any] = 0.1 , lowerCamelCase__ : Optional[Any] = 0.1 , lowerCamelCase__ : Optional[Any] = 0.1 , lowerCamelCase__ : Optional[int] = 1E-5 , lowerCamelCase__ : List[str] = 0.0_2 , lowerCamelCase__ : Optional[Any] = True , lowerCamelCase__ : List[str] = True , lowerCamelCase__ : Optional[Any] = False , lowerCamelCase__ : int = False , ) ->Tuple:
'''simple docstring'''
super().__init__()
_UpperCAmelCase : int = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
_UpperCAmelCase : Union[str, Any] = prefix_inner_dim
_UpperCAmelCase : Tuple = prefix_hidden_dim
_UpperCAmelCase : Union[str, Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_UpperCAmelCase : Dict = (
nn.Linear(self.prefix_hidden_dim , lowerCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
_UpperCAmelCase : Optional[int] = GPTaConfig(
vocab_size=lowerCamelCase__ , n_positions=lowerCamelCase__ , n_embd=lowerCamelCase__ , n_layer=lowerCamelCase__ , n_head=lowerCamelCase__ , n_inner=lowerCamelCase__ , activation_function=lowerCamelCase__ , resid_pdrop=lowerCamelCase__ , embd_pdrop=lowerCamelCase__ , attn_pdrop=lowerCamelCase__ , layer_norm_epsilon=lowerCamelCase__ , initializer_range=lowerCamelCase__ , scale_attn_weights=lowerCamelCase__ , use_cache=lowerCamelCase__ , scale_attn_by_inverse_layer_idx=lowerCamelCase__ , reorder_and_upcast_attn=lowerCamelCase__ , )
_UpperCAmelCase : int = GPTaLMHeadModel(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : int = None , lowerCamelCase__ : Optional[Any] = None , ) ->str:
'''simple docstring'''
_UpperCAmelCase : Dict = self.transformer.transformer.wte(lowerCamelCase__ )
_UpperCAmelCase : Any = self.encode_prefix(lowerCamelCase__ )
_UpperCAmelCase : str = self.decode_prefix(lowerCamelCase__ )
_UpperCAmelCase : int = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
_UpperCAmelCase : str = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
_UpperCAmelCase : List[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
_UpperCAmelCase : Any = self.transformer(inputs_embeds=lowerCamelCase__ , labels=lowerCamelCase__ , attention_mask=lowerCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[str] ) ->torch.Tensor:
'''simple docstring'''
return torch.zeros(lowerCamelCase__ , self.prefix_length , dtype=torch.intaa , device=lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[Any] ) ->List[Any]:
'''simple docstring'''
return self.encode_prefix(lowerCamelCase__ )
@torch.no_grad()
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = torch.split(lowerCamelCase__ , 1 , dim=0 )
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : Optional[int] = []
for feature in features:
_UpperCAmelCase : Tuple = self.decode_prefix(feature.to(lowerCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.generate_beam(
input_embeds=lowerCamelCase__ , device=lowerCamelCase__ , eos_token_id=lowerCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
_UpperCAmelCase : Any = torch.stack(lowerCamelCase__ )
_UpperCAmelCase : str = torch.stack(lowerCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : str=None , lowerCamelCase__ : int=None , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int = 5 , lowerCamelCase__ : Optional[int] = 67 , lowerCamelCase__ : Optional[Any] = 1.0 , lowerCamelCase__ : Tuple = None , ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = eos_token_id
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : str = None
_UpperCAmelCase : Union[str, Any] = torch.ones(lowerCamelCase__ , device=lowerCamelCase__ , dtype=torch.int )
_UpperCAmelCase : List[Any] = torch.zeros(lowerCamelCase__ , device=lowerCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
_UpperCAmelCase : List[Any] = input_embeds
else:
_UpperCAmelCase : List[Any] = self.transformer.transformer.wte(lowerCamelCase__ )
for i in range(lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = self.transformer(inputs_embeds=lowerCamelCase__ )
_UpperCAmelCase : Any = outputs.logits
_UpperCAmelCase : Tuple = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_UpperCAmelCase : Optional[Any] = logits.softmax(-1 ).log()
if scores is None:
_UpperCAmelCase , _UpperCAmelCase : int = logits.topk(lowerCamelCase__ , -1 )
_UpperCAmelCase : str = generated.expand(lowerCamelCase__ , *generated.shape[1:] )
_UpperCAmelCase , _UpperCAmelCase : int = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
_UpperCAmelCase : Union[str, Any] = next_tokens
else:
_UpperCAmelCase : Union[str, Any] = tokens.expand(lowerCamelCase__ , *tokens.shape[1:] )
_UpperCAmelCase : Optional[int] = torch.cat((tokens, next_tokens) , dim=1 )
else:
_UpperCAmelCase : str = -float(np.inf )
_UpperCAmelCase : Dict = 0
_UpperCAmelCase : Tuple = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_UpperCAmelCase : Tuple = scores_sum / seq_lengths[:, None]
_UpperCAmelCase , _UpperCAmelCase : int = scores_sum_average.view(-1 ).topk(lowerCamelCase__ , -1 )
_UpperCAmelCase : str = next_tokens // scores_sum.shape[1]
_UpperCAmelCase : int = seq_lengths[next_tokens_source]
_UpperCAmelCase : Optional[int] = next_tokens % scores_sum.shape[1]
_UpperCAmelCase : List[str] = next_tokens.unsqueeze(1 )
_UpperCAmelCase : Dict = tokens[next_tokens_source]
_UpperCAmelCase : Optional[int] = torch.cat((tokens, next_tokens) , dim=1 )
_UpperCAmelCase : Dict = generated[next_tokens_source]
_UpperCAmelCase : Dict = scores_sum_average * seq_lengths
_UpperCAmelCase : Optional[Any] = is_stopped[next_tokens_source]
_UpperCAmelCase : List[str] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
_UpperCAmelCase : int = torch.cat((generated, next_token_embed) , dim=1 )
_UpperCAmelCase : int = is_stopped + next_tokens.eq(lowerCamelCase__ ).squeeze()
if is_stopped.all():
break
_UpperCAmelCase : Union[str, Any] = scores / seq_lengths
_UpperCAmelCase : Tuple = scores.argsort(descending=lowerCamelCase__ )
# tokens tensors are already padded to max_seq_length
_UpperCAmelCase : List[Any] = [tokens[i] for i in order]
_UpperCAmelCase : Dict = torch.stack(lowerCamelCase__ , dim=0 )
_UpperCAmelCase : List[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 717
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 0
|
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase__ = logging.getLogger()
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Dict = os.path.join(lowercase__ , "all_results.json" )
if os.path.exists(lowercase__ ):
with open(lowercase__ , "r" ) as f:
_UpperCAmelCase : Optional[Any] = json.load(lowercase__ )
else:
raise ValueError(F"""can't find {path}""" )
return results
lowerCamelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( __a ):
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
import xla_spawn
_UpperCAmelCase : Tuple = self.get_auto_remove_tmp_dir()
_UpperCAmelCase : Dict = F"""
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_ ):
_UpperCAmelCase : Union[str, Any] = time()
xla_spawn.main()
_UpperCAmelCase : List[Any] = time()
_UpperCAmelCase : Any = get_results(lowerCAmelCase_ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.7_5 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 5_00 )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
import xla_spawn
_UpperCAmelCase : Optional[int] = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(lowerCAmelCase_ , "argv" , lowerCAmelCase_ ):
xla_spawn.main()
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCamelCase__ = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 719
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Dict = MvpTokenizer
lowerCAmelCase : List[str] = MvpTokenizerFast
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : int = filter_roberta_detectors
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
super().setUp()
_UpperCAmelCase : int = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
_UpperCAmelCase : List[Any] = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
_UpperCAmelCase : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
_UpperCAmelCase : List[str] = {"unk_token": "<unk>"}
_UpperCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self : Tuple , **lowerCamelCase__ : List[str] ) ->Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , **lowerCamelCase__ : int ) ->Tuple:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def lowerCAmelCase__ ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_UpperCAmelCase : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase__ , max_length=len(UpperCAmelCase__ ) , padding=UpperCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
_UpperCAmelCase : Any = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
# Test that special tokens are reset
@require_torch
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase : Tuple = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : Optional[Any] = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , UpperCAmelCase__ )
self.assertIn("attention_mask" , UpperCAmelCase__ )
self.assertNotIn("labels" , UpperCAmelCase__ )
self.assertNotIn("decoder_attention_mask" , UpperCAmelCase__ )
@require_torch
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : List[Any] = tokenizer(text_target=UpperCAmelCase__ , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : int = tokenizer(
["I am a small frog" * 10_24, "I am a small frog"] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , return_tensors="pt" )
self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__ )
self.assertEqual(batch.input_ids.shape , (2, 10_24) )
@require_torch
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = ["A long paragraph for summarization."]
_UpperCAmelCase : str = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
_UpperCAmelCase : int = tokenizer(UpperCAmelCase__ , text_target=UpperCAmelCase__ , return_tensors="pt" )
_UpperCAmelCase : List[str] = inputs["input_ids"]
_UpperCAmelCase : Union[str, Any] = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
_UpperCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(UpperCAmelCase__ , **UpperCAmelCase__ )
_UpperCAmelCase : Dict = "A, <mask> AllenNLP sentence."
_UpperCAmelCase : int = tokenizer_r.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
_UpperCAmelCase : Any = tokenizer_p.encode_plus(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_token_type_ids=UpperCAmelCase__ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
_UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
_UpperCAmelCase : Optional[int] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
UpperCAmelCase__ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 720
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class lowerCAmelCase__ :
def __init__( self : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : str=13 , lowerCamelCase__ : int=7 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Tuple=False , lowerCamelCase__ : Dict=True , lowerCamelCase__ : str=99 , lowerCamelCase__ : List[str]=64 , lowerCamelCase__ : Any=5 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Union[str, Any]=64 , lowerCamelCase__ : str="gelu" , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : str=0.1 , lowerCamelCase__ : Any=5_12 , lowerCamelCase__ : Optional[int]=16 , lowerCamelCase__ : str=2 , lowerCamelCase__ : Union[str, Any]=0.0_2 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : List[str]=4 , lowerCamelCase__ : Union[str, Any]=None , ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[Any] = batch_size
_UpperCAmelCase : Optional[Any] = seq_length
_UpperCAmelCase : List[Any] = is_training
_UpperCAmelCase : Optional[int] = use_input_mask
_UpperCAmelCase : Dict = use_token_type_ids
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : str = num_hidden_layers
_UpperCAmelCase : Tuple = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : List[str] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : List[Any] = max_position_embeddings
_UpperCAmelCase : Dict = type_vocab_size
_UpperCAmelCase : Optional[int] = type_sequence_label_size
_UpperCAmelCase : int = initializer_range
_UpperCAmelCase : Any = num_labels
_UpperCAmelCase : int = num_choices
_UpperCAmelCase : str = scope
def lowerCAmelCase__ ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Union[str, Any] = None
if self.use_input_mask:
_UpperCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : str = None
_UpperCAmelCase : str = None
_UpperCAmelCase : Any = None
if self.use_labels:
_UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : Tuple = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : Dict ) ->Dict:
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = MPNetModel(config=A__ )
model.to(A__ )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(A__ , A__ )
_UpperCAmelCase : Any = model(A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int ) ->str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MPNetForQuestionAnswering(config=A__ )
model.to(A__ )
model.eval()
_UpperCAmelCase : str = model(
A__ , attention_mask=A__ , start_positions=A__ , end_positions=A__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Dict , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : Union[str, Any] = MPNetForSequenceClassification(A__ )
model.to(A__ )
model.eval()
_UpperCAmelCase : Any = model(A__ , attention_mask=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.num_choices
_UpperCAmelCase : int = MPNetForMultipleChoice(config=A__ )
model.to(A__ )
model.eval()
_UpperCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Union[str, Any] = model(
A__ , attention_mask=A__ , labels=A__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : Union[str, Any] = MPNetForTokenClassification(config=A__ )
model.to(A__ )
model.eval()
_UpperCAmelCase : Optional[int] = model(A__ , attention_mask=A__ , labels=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : Tuple = config_and_inputs
_UpperCAmelCase : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( __a , __a , unittest.TestCase ):
lowerCAmelCase : List[Any] = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
lowerCAmelCase : Dict = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase : str = False
lowerCAmelCase : Union[str, Any] = True
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MPNetModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=A__ , hidden_size=37 )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*A__ )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*A__ )
def lowerCAmelCase__ ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*A__ )
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*A__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*A__ )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Tuple = MPNetModel.from_pretrained("microsoft/mpnet-base" )
_UpperCAmelCase : Any = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
_UpperCAmelCase : str = model(A__ )[0]
_UpperCAmelCase : str = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , A__ )
_UpperCAmelCase : int = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=1E-4 ) )
| 721
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 0
|
'''simple docstring'''
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
lowerCamelCase__ = 'Usage of script: script_name <size_of_canvas:int>'
lowerCamelCase__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = [[False for i in range(_SCREAMING_SNAKE_CASE )] for j in range(_SCREAMING_SNAKE_CASE )]
return canvas
def __lowerCAmelCase (__lowerCAmelCase ):
for i, row in enumerate(_SCREAMING_SNAKE_CASE ):
for j, _ in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Tuple = bool(random.getrandbits(1 ) )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Dict = np.array(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(_SCREAMING_SNAKE_CASE ):
for c, pt in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Dict = __judge_point(
_SCREAMING_SNAKE_CASE , current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_UpperCAmelCase : Optional[int] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_UpperCAmelCase : Tuple = current_canvas.tolist()
return return_canvas
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Optional[Any] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_UpperCAmelCase : List[Any] = pt
if pt:
if alive < 2:
_UpperCAmelCase : Any = False
elif alive == 2 or alive == 3:
_UpperCAmelCase : Tuple = True
elif alive > 3:
_UpperCAmelCase : List[Any] = False
else:
if alive == 3:
_UpperCAmelCase : Dict = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
lowerCamelCase__ = int(sys.argv[1])
# main working structure of this module.
lowerCamelCase__ = create_canvas(canvas_size)
seed(c)
lowerCamelCase__ ,lowerCamelCase__ = plt.subplots()
fig.show()
lowerCamelCase__ = ListedColormap(['w', 'k'])
try:
while True:
lowerCamelCase__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 700
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XLMRobertaTokenizer, XLMRobertaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCamelCase__ = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( __snake_case , unittest.TestCase ):
lowerCAmelCase : Tuple = XLMRobertaTokenizer
lowerCAmelCase : List[Any] = XLMRobertaTokenizerFast
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : Dict = True
def lowerCAmelCase__ ( self : Any ) ->int:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Tuple = XLMRobertaTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = "<pad>"
_UpperCAmelCase : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def lowerCAmelCase__ ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__UpperCamelCase ) , 10_02 )
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 10_02 )
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = XLMRobertaTokenizer(__UpperCamelCase , keep_accents=__UpperCamelCase )
_UpperCAmelCase : int = tokenizer.tokenize("This is a test" )
self.assertListEqual(__UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [2_85, 46, 10, 1_70, 3_82]] , )
_UpperCAmelCase : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
_UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
_UpperCAmelCase : int = (self.rust_tokenizer_class, "hf-internal-testing/tiny-xlm-roberta", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase : str = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase : List[Any] = tempfile.mkdtemp()
_UpperCAmelCase : Tuple = tokenizer_r.save_pretrained(__UpperCamelCase )
_UpperCAmelCase : Tuple = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
_UpperCAmelCase : List[Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
_UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(__UpperCamelCase )
_UpperCAmelCase : List[str] = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=True
_UpperCAmelCase : str = tempfile.mkdtemp()
_UpperCAmelCase : int = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
_UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(__UpperCamelCase , __UpperCamelCase )
# Checks everything loads correctly in the same way
_UpperCAmelCase : List[Any] = tokenizer_r.from_pretrained(__UpperCamelCase )
_UpperCAmelCase : Any = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
# Save tokenizer rust, legacy_format=False
_UpperCAmelCase : List[str] = tempfile.mkdtemp()
_UpperCAmelCase : Union[str, Any] = tokenizer_r.save_pretrained(__UpperCamelCase , legacy_format=__UpperCamelCase )
_UpperCAmelCase : Tuple = tokenizer_p.save_pretrained(__UpperCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
_UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(__UpperCamelCase )
_UpperCAmelCase : str = tokenizer_p.from_pretrained(__UpperCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCamelCase , __UpperCamelCase ) )
shutil.rmtree(__UpperCamelCase )
@cached_property
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
return XLMRobertaTokenizer.from_pretrained("xlm-roberta-base" )
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(__UpperCamelCase , f.name )
_UpperCAmelCase : Any = XLMRobertaTokenizer(f.name , keep_accents=__UpperCamelCase )
_UpperCAmelCase : Tuple = pickle.dumps(__UpperCamelCase )
pickle.loads(__UpperCamelCase )
def lowerCAmelCase__ ( self : int ) ->Union[str, Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCAmelCase : Union[str, Any] = self.get_tokenizer()
_UpperCAmelCase : Tuple = self.get_rust_tokenizer()
_UpperCAmelCase : List[Any] = "I was born in 92000, and this is falsé."
_UpperCAmelCase : List[Any] = tokenizer.tokenize(__UpperCamelCase )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase : Any = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase : int = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase : int = self.get_rust_tokenizer()
_UpperCAmelCase : Dict = tokenizer.encode(__UpperCamelCase )
_UpperCAmelCase : Any = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
@slow
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : int = "Hello World!"
_UpperCAmelCase : Tuple = [0, 3_53_78, 66_61, 38, 2]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def lowerCAmelCase__ ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Any = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_UpperCAmelCase : Any = [
0,
32_93,
83,
10,
45_52,
49_89,
79_86,
6_78,
10,
59_15,
1_11,
17_94_59,
12_48_50,
4,
60_44,
2_37,
12,
6,
5,
6,
4,
67_80,
7_05,
15,
13_88,
44,
3_78,
1_01_14,
7_11,
1_52,
20,
6,
5,
2_23_76,
6_42,
12_21,
1_51_90,
3_41_53,
4_50,
56_08,
9_59,
11_19,
5_77_02,
1_36,
1_86,
47,
10_98,
2_93_67,
47,
# 4426, # What fairseq tokenizes from "<unk>": "_<"
# 3678, # What fairseq tokenizes from "<unk>": "unk"
# 2740, # What fairseq tokenizes from "<unk>": ">"
3, # What we tokenize from "<unk>": "<unk>"
6, # Residue from the tokenization: an extra sentencepiece underline
4,
60_44,
2_37,
62_84,
5_09_01,
5_28,
31,
90,
34,
9_27,
2,
]
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base') # xlmr.large has same tokenizer
# xlmr.eval()
# xlmr.encode(symbols)
self.assertListEqual(__UpperCamelCase , self.big_tokenizer.encode(__UpperCamelCase ) )
@slow
def lowerCAmelCase__ ( self : Union[str, Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = {"input_ids": [[0, 1_10_62, 8_27_72, 7, 15, 8_27_72, 5_38, 5_15_29, 2_37, 1_71_98, 12_90, 2_06, 9, 21_51_75, 13_14, 1_36, 1_71_98, 12_90, 2_06, 9, 5_63_59, 42, 12_20_09, 9, 1_64_66, 16, 8_73_44, 45_37, 9, 47_17, 7_83_81, 6, 15_99_58, 7, 15, 2_44_80, 6_18, 4, 5_27, 2_26_93, 54_28, 4, 27_77, 2_44_80, 98_74, 4, 4_35_23, 5_94, 4, 8_03, 1_83_92, 3_31_89, 18, 4, 4_35_23, 2_44_47, 1_23_99, 1_00, 2_49_55, 8_36_58, 96_26, 14_40_57, 15, 8_39, 2_23_35, 16, 1_36, 2_49_55, 8_36_58, 8_34_79, 15, 3_91_02, 7_24, 16, 6_78, 6_45, 27_89, 13_28, 45_89, 42, 12_20_09, 11_57_74, 23, 8_05, 13_28, 4_68_76, 7, 1_36, 5_38_94, 19_40, 4_22_27, 4_11_59, 1_77_21, 8_23, 4_25, 4, 2_75_12, 9_87_22, 2_06, 1_36, 55_31, 49_70, 9_19, 1_73_36, 5, 2], [0, 2_00_80, 6_18, 83, 8_27_75, 47, 4_79, 9, 15_17, 73, 5_38_94, 3_33, 8_05_81, 11_01_17, 1_88_11, 52_56, 12_95, 51, 15_25_26, 2_97, 79_86, 3_90, 12_44_16, 5_38, 3_54_31, 2_14, 98, 1_50_44, 2_57_37, 1_36, 71_08, 4_37_01, 23, 7_56, 13_53_55, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 5_81, 6_37_73, 11_94_55, 6, 14_77_97, 8_82_03, 7, 6_45, 70, 21, 32_85, 1_02_69, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="xlm-roberta-base" , revision="d9d8a8ea5eb94b1c6654ae9249df7793cd2933d3" , )
| 701
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 0
|
class lowerCAmelCase__ :
def __init__( self : Dict , lowerCamelCase__ : Dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = size
_UpperCAmelCase : Optional[int] = [0] * size
_UpperCAmelCase : Optional[int] = [0] * size
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : List[Any] ) ->int:
'''simple docstring'''
return index | (index + 1)
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase__ : Dict ) ->int:
'''simple docstring'''
return (index & (index + 1)) - 1
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Union[str, Any] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = value
while index < self.size:
_UpperCAmelCase : Optional[int] = self.get_prev(lowercase__ ) + 1
if current_left_border == index:
_UpperCAmelCase : Optional[int] = value
else:
_UpperCAmelCase : Dict = max(lowercase__ , lowercase__ , lowercase__ )
_UpperCAmelCase : Tuple = self.get_next(lowercase__ )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] ) ->int:
'''simple docstring'''
right -= 1 # Because of right is exclusive
_UpperCAmelCase : Dict = 0
while left <= right:
_UpperCAmelCase : Tuple = self.get_prev(lowercase__ )
if left <= current_left:
_UpperCAmelCase : Union[str, Any] = max(lowercase__ , self.tree[right] )
_UpperCAmelCase : Optional[int] = current_left
else:
_UpperCAmelCase : str = max(lowercase__ , self.arr[right] )
right -= 1
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = 2
_UpperCAmelCase : Optional[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(SCREAMING_SNAKE_CASE__ )
if n > 1:
factors.append(SCREAMING_SNAKE_CASE__ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 0
|
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCamelCase__ = float('nan')
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = sys.stdout
_UpperCAmelCase : Optional[int] = open(__snake_case , "a" )
def __getattr__( self : str , lowerCamelCase__ : List[str] ) ->Optional[int]:
'''simple docstring'''
return getattr(self.stdout , __snake_case )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Dict ) ->Union[str, Any]:
'''simple docstring'''
self.stdout.write(__snake_case )
# strip tqdm codes
self.file.write(re.sub(R"^.*\r" , "" , __snake_case , 0 , re.M ) )
def __lowerCAmelCase (__lowerCAmelCase=80 , __lowerCAmelCase=False ) -> str:
_UpperCAmelCase : Tuple = []
# deal with critical env vars
_UpperCAmelCase : Dict = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
_UpperCAmelCase : Optional[int] = os.environ.get(a_ , a_ )
if val is not None:
cmd.append(F"""{key}={val}""" )
# python executable (not always needed if the script is executable)
_UpperCAmelCase : str = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(a_ )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Optional[int] = ''''''
while len(a_ ) > 0:
current_line += F"""{cmd.pop(0 )} """
if len(a_ ) == 0 or len(a_ ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(a_ )
_UpperCAmelCase : List[Any] = ''''''
return "\\\n".join(a_ )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
# unwrap multi-line input
_UpperCAmelCase : Optional[int] = re.sub(R"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
_UpperCAmelCase : int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F""" --output_dir {output_dir}"""
# ensure we have --overwrite_output_dir
_UpperCAmelCase : Optional[Any] = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 1_0.3_1, 1_0_0.2, 5_5.6_6_6_6, 2_2_2.2_2_2_2_2_2_2_2] )} , )
_UpperCAmelCase : Union[str, Any] = subprocess.run(a_ , capture_output=a_ , text=a_ )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
_UpperCAmelCase : Dict = variation.replace(" " , "-" )
with open(Path(a_ ) / F"""log.{prefix}.stdout.txt""" , "w" ) as f:
f.write(result.stdout )
with open(Path(a_ ) / F"""log.{prefix}.stderr.txt""" , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F"""{output_dir}/all_results.json""" , "r" , encoding="utf-8" ) as f:
_UpperCAmelCase : List[str] = json.load(a_ )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> Union[str, Any]:
_UpperCAmelCase : Dict = []
_UpperCAmelCase : List[Any] = []
_UpperCAmelCase : Any = F"""{id}: {variation:<{longest_variation_len}}"""
_UpperCAmelCase : int = F"""{preamble}: """
_UpperCAmelCase : str = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(a_ ) , desc=a_ , leave=a_ ):
_UpperCAmelCase : Union[str, Any] = process_run_single(
a_ , a_ , a_ , a_ , a_ , a_ , a_ )
_UpperCAmelCase : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(a_ ):
metrics.append(a_ )
results.append(a_ )
outcome += "✓"
else:
outcome += "✘"
_UpperCAmelCase : List[str] = F"""\33[2K\r{outcome}"""
if len(a_ ) > 0:
_UpperCAmelCase : Tuple = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_UpperCAmelCase : Union[str, Any] = round(mean_metrics[target_metric_key] , 2 )
_UpperCAmelCase : Optional[int] = F"""{outcome} {mean_target}"""
if len(a_ ) > 1:
results_str += F""" {tuple(round(a_ , 2 ) for x in results )}"""
print(a_ )
_UpperCAmelCase : Union[str, Any] = variation
return mean_metrics
else:
print(a_ )
return {variation_key: variation, target_metric_key: nan}
def __lowerCAmelCase () -> Dict:
_UpperCAmelCase : List[str] = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F"""\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n"""
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple:
_UpperCAmelCase : Any = pd.DataFrame(a_ )
_UpperCAmelCase : Any = '''variation'''
_UpperCAmelCase : List[Any] = '''diff_%'''
_UpperCAmelCase : Any = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_UpperCAmelCase : Tuple = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(a_ ):
# as a fallback, use the minimal value as the sentinel
_UpperCAmelCase : Optional[int] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(a_ ):
_UpperCAmelCase : Dict = df.apply(
lambda __lowerCAmelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
_UpperCAmelCase : Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_UpperCAmelCase : Dict = df.reindex(a_ , axis="columns" ) # reorder cols
# capitalize
_UpperCAmelCase : List[str] = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
_UpperCAmelCase : Optional[Any] = df.rename(lambda __lowerCAmelCase : c.replace("_" , "<br>" ) , axis="columns" )
_UpperCAmelCase : Union[str, Any] = df.rename(lambda __lowerCAmelCase : c.replace("_" , "\n" ) , axis="columns" )
_UpperCAmelCase : Any = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=a_ , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=a_ , floatfmt=".2f" )]
print("\n\n".join(a_ ) )
def __lowerCAmelCase () -> Any:
_UpperCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=a_ , type=a_ , required=a_ , help="Base cmd" , )
parser.add_argument(
"--variations" , default=a_ , type=a_ , nargs="+" , required=a_ , help="Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'" , )
parser.add_argument(
"--base-variation" , default=a_ , type=a_ , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=a_ , type=a_ , required=a_ , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=a_ , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=a_ , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=a_ , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=a_ , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
_UpperCAmelCase : Optional[Any] = parser.parse_args()
_UpperCAmelCase : List[Any] = args.output_dir
Path(a_ ).mkdir(exist_ok=a_ )
_UpperCAmelCase : List[str] = get_base_command(a_ , a_ )
# split each dimension into its --foo variations
_UpperCAmelCase : int = [list(map(str.strip , re.split(R"\|" , a_ ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_UpperCAmelCase : Optional[int] = list(map(str.strip , map(" ".join , itertools.product(*a_ ) ) ) )
_UpperCAmelCase : Tuple = max(len(a_ ) for x in variations )
# split wanted keys
_UpperCAmelCase : Tuple = args.report_metric_keys.split()
# capture prints into a log file for convenience
_UpperCAmelCase : Dict = F"""benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt"""
print(F"""\nNote: each run's output is also logged under {output_dir}/log.*.std*.txt""" )
print(F"""and this script's output is also piped into {report_fn}""" )
_UpperCAmelCase : Optional[Any] = Tee(a_ )
print(F"""\n*** Running {len(a_ )} benchmarks:""" )
print(F"""Base command: {' '.join(a_ )}""" )
_UpperCAmelCase : Dict = '''variation'''
_UpperCAmelCase : Optional[Any] = []
for id, variation in enumerate(tqdm(a_ , desc="Total completion: " , leave=a_ ) ):
_UpperCAmelCase : int = base_cmd + variation.split()
results.append(
process_run(
id + 1 , a_ , a_ , a_ , a_ , args.target_metric_key , a_ , args.repeat_times , a_ , args.verbose , ) )
process_results(a_ , args.target_metric_key , a_ , args.base_variation , a_ )
if __name__ == "__main__":
main()
| 704
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
from PIL import Image
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
def brightness(__lowerCAmelCase ) -> float:
return 128 + level + (c - 128)
if not -2_5_5.0 <= level <= 2_5_5.0:
raise ValueError("level must be between -255.0 (black) and 255.0 (white)" )
return img.point(__lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change brightness to 100
lowerCamelCase__ = change_brightness(img, 100)
brigt_img.save('image_data/lena_brightness.png', format='png')
| 705
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 0
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase="attention" ):
_UpperCAmelCase : List[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_UpperCAmelCase : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_UpperCAmelCase : Optional[Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_UpperCAmelCase : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_UpperCAmelCase : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_UpperCAmelCase : int = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_UpperCAmelCase : Dict = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_UpperCAmelCase : Optional[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
if split_mlp_wi:
_UpperCAmelCase : Dict = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_UpperCAmelCase : Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_UpperCAmelCase : Union[str, Any] = (wi_a, wi_a)
else:
_UpperCAmelCase : Optional[Any] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_UpperCAmelCase : int = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __lowerCAmelCase (__lowerCAmelCase , *, __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ):
_UpperCAmelCase : int = traverse_util.flatten_dict(variables["target"] )
_UpperCAmelCase : str = {"/".join(lowercase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_UpperCAmelCase : Union[str, Any] = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , lowercase_ )
_UpperCAmelCase : List[Any] = collections.OrderedDict()
# Shared embeddings.
_UpperCAmelCase : str = old["token_embedder/embedding"]
# Encoder.
for i in range(lowercase_ ):
# Block i, layer 0 (Self Attention).
_UpperCAmelCase : int = tax_layer_norm_lookup(lowercase_ , lowercase_ , "encoder" , "pre_attention_layer_norm" )
_UpperCAmelCase : str = tax_attention_lookup(lowercase_ , lowercase_ , "encoder" , "attention" )
_UpperCAmelCase : List[Any] = layer_norm
_UpperCAmelCase : str = k.T
_UpperCAmelCase : Optional[Any] = o.T
_UpperCAmelCase : List[str] = q.T
_UpperCAmelCase : int = v.T
# Block i, layer 1 (MLP).
_UpperCAmelCase : Dict = tax_layer_norm_lookup(lowercase_ , lowercase_ , "encoder" , "pre_mlp_layer_norm" )
_UpperCAmelCase : int = tax_mlp_lookup(lowercase_ , lowercase_ , "encoder" , lowercase_ )
_UpperCAmelCase : int = layer_norm
if split_mlp_wi:
_UpperCAmelCase : Optional[int] = wi[0].T
_UpperCAmelCase : Optional[Any] = wi[1].T
else:
_UpperCAmelCase : Any = wi.T
_UpperCAmelCase : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCAmelCase : Optional[int] = tax_relpos_bias_lookup(
lowercase_ , lowercase_ , "encoder" ).T
_UpperCAmelCase : str = old["encoder/encoder_norm/scale"]
if not scalable_attention:
_UpperCAmelCase : Any = tax_relpos_bias_lookup(
lowercase_ , 0 , "encoder" ).T
_UpperCAmelCase : str = tax_relpos_bias_lookup(
lowercase_ , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowercase_ ):
# Block i, layer 0 (Self Attention).
_UpperCAmelCase : List[Any] = tax_layer_norm_lookup(lowercase_ , lowercase_ , "decoder" , "pre_self_attention_layer_norm" )
_UpperCAmelCase : Any = tax_attention_lookup(lowercase_ , lowercase_ , "decoder" , "self_attention" )
_UpperCAmelCase : List[str] = layer_norm
_UpperCAmelCase : Union[str, Any] = k.T
_UpperCAmelCase : str = o.T
_UpperCAmelCase : List[str] = q.T
_UpperCAmelCase : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
_UpperCAmelCase : Dict = tax_layer_norm_lookup(lowercase_ , lowercase_ , "decoder" , "pre_cross_attention_layer_norm" )
_UpperCAmelCase : int = tax_attention_lookup(lowercase_ , lowercase_ , "decoder" , "encoder_decoder_attention" )
_UpperCAmelCase : List[str] = layer_norm
_UpperCAmelCase : str = k.T
_UpperCAmelCase : List[Any] = o.T
_UpperCAmelCase : int = q.T
_UpperCAmelCase : List[Any] = v.T
# Block i, layer 2 (MLP).
_UpperCAmelCase : Tuple = tax_layer_norm_lookup(lowercase_ , lowercase_ , "decoder" , "pre_mlp_layer_norm" )
_UpperCAmelCase : Tuple = tax_mlp_lookup(lowercase_ , lowercase_ , "decoder" , lowercase_ )
_UpperCAmelCase : Optional[int] = layer_norm
if split_mlp_wi:
_UpperCAmelCase : List[Any] = wi[0].T
_UpperCAmelCase : Optional[int] = wi[1].T
else:
_UpperCAmelCase : str = wi.T
_UpperCAmelCase : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_UpperCAmelCase : Optional[int] = tax_relpos_bias_lookup(lowercase_ , lowercase_ , "decoder" ).T
_UpperCAmelCase : Tuple = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_UpperCAmelCase : Any = old["decoder/logits_dense/kernel"].T
return new
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_UpperCAmelCase : str = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_UpperCAmelCase : Optional[int] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
_UpperCAmelCase : int = state_dict["shared.weight"]
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = checkpoints.load_tax_checkpoint(lowercase_ )
_UpperCAmelCase : List[str] = convert_tax_to_pytorch(
lowercase_ , num_layers=config.num_layers , is_encoder_only=lowercase_ , scalable_attention=lowercase_ )
_UpperCAmelCase : Union[str, Any] = make_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ , strict=lowercase_ )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = False , ):
_UpperCAmelCase : int = MTaConfig.from_json_file(lowercase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_UpperCAmelCase : Optional[int] = UMTaEncoderModel(lowercase_ )
else:
_UpperCAmelCase : Optional[int] = UMTaForConditionalGeneration(lowercase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowercase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase_ )
print("Done" )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
lowerCamelCase__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 706
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 0
|
'''simple docstring'''
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __lowerCAmelCase ():
_UpperCAmelCase : int = argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=__UpperCamelCase , default=__UpperCamelCase , required=__UpperCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=__UpperCamelCase , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=__UpperCamelCase , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=__UpperCamelCase , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=__UpperCamelCase , default=0 , help="cuda_id." , )
_UpperCAmelCase : Tuple = parser.parse_args()
return args
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not len(__UpperCamelCase ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
_UpperCAmelCase , _UpperCAmelCase : int = imgs[0].size
_UpperCAmelCase : Dict = Image.new("RGB" , size=(cols * w, rows * h) )
_UpperCAmelCase , _UpperCAmelCase : List[str] = grid.size
for i, img in enumerate(__UpperCamelCase ):
grid.paste(__UpperCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase="robotic cat with wings" , __lowerCAmelCase=7.5 , __lowerCAmelCase=50 , __lowerCAmelCase=1 , __lowerCAmelCase=42 , ):
_UpperCAmelCase : str = torch.Generator(pipeline.device ).manual_seed(__UpperCamelCase )
_UpperCAmelCase : Optional[int] = pipeline(
__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=__UpperCamelCase , generator=__UpperCamelCase , num_images_per_prompt=__UpperCamelCase , ).images
_UpperCAmelCase : Optional[int] = int(math.sqrt(__UpperCamelCase ) )
_UpperCAmelCase : List[str] = image_grid(__UpperCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCamelCase__ = parse_args()
# Load models and create wrapper for stable diffusion
lowerCamelCase__ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowerCamelCase__ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowerCamelCase__ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowerCamelCase__ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowerCamelCase__ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCamelCase__ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowerCamelCase__ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowerCamelCase__ = unet.to(torch.device('cuda', args.cuda_id))
lowerCamelCase__ = pipeline.to(unet.device)
lowerCamelCase__ ,lowerCamelCase__ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowerCamelCase__ = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 707
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 0
|
'''simple docstring'''
import math
class lowerCAmelCase__ :
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : list[list[float]] , lowerCamelCase__ : list[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = 0.0
_UpperCAmelCase : List[Any] = 0.0
for i in range(len(_lowerCAmelCase ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : list[list[int | float]] , lowerCamelCase__ : list[int] , lowerCamelCase__ : int , lowerCamelCase__ : float ) ->list[list[int | float]]:
'''simple docstring'''
for i in range(len(_lowerCAmelCase ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def __lowerCAmelCase ():
_UpperCAmelCase : str = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_UpperCAmelCase : Optional[int] = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_UpperCAmelCase : List[Any] = SelfOrganizingMap()
_UpperCAmelCase : Union[str, Any] = 3
_UpperCAmelCase : List[Any] = 0.5
for _ in range(__lowerCAmelCase ):
for j in range(len(__lowerCAmelCase ) ):
# training sample
_UpperCAmelCase : Optional[Any] = training_samples[j]
# Compute the winning vector
_UpperCAmelCase : int = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# Update the winning vector
_UpperCAmelCase : List[Any] = self_organizing_map.update(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# classify test sample
_UpperCAmelCase : Tuple = [0, 0, 0, 1]
_UpperCAmelCase : Dict = self_organizing_map.get_winner(__lowerCAmelCase , __lowerCAmelCase )
# results
print(F"""Clusters that the test sample belongs to : {winner}""" )
print(F"""Weights that have been trained : {weights}""" )
# running the main() function
if __name__ == "__main__":
main()
| 708
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 0
|
'''simple docstring'''
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase__ ( UpperCamelCase__ , unittest.TestCase ):
lowerCAmelCase : List[Any] = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any]=0 ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Dict = np.random.RandomState(_a )
_UpperCAmelCase : Any = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : int = self.get_dummy_inputs()
_UpperCAmelCase : int = pipe(**_a ).images
_UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCAmelCase : Any = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase : List[str] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
_UpperCAmelCase : Tuple = pipe(**_a ).images
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCAmelCase : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : Tuple = self.get_dummy_inputs()
_UpperCAmelCase : Any = pipe(**_a ).images
_UpperCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase : Optional[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
_UpperCAmelCase : int = pipe(**_a ).images
_UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCAmelCase : Any = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : Any = self.get_dummy_inputs()
_UpperCAmelCase : int = pipe(**_a ).images
_UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCAmelCase : Tuple = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self : Any ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
_UpperCAmelCase : Any = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : int = self.get_dummy_inputs()
_UpperCAmelCase : int = pipe(**_a ).images
_UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
_UpperCAmelCase : Optional[int] = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : str = self.get_dummy_inputs()
_UpperCAmelCase : List[str] = 3 * [inputs["""prompt"""]]
# forward
_UpperCAmelCase : Any = pipe(**_a )
_UpperCAmelCase : Any = output.images[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
_UpperCAmelCase : Optional[Any] = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase : Dict = pipe.tokenizer(
_a , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_a , return_tensors="np" , )
_UpperCAmelCase : Union[str, Any] = text_inputs["""input_ids"""]
_UpperCAmelCase : Union[str, Any] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
_UpperCAmelCase : int = prompt_embeds
# forward
_UpperCAmelCase : Dict = pipe(**_a )
_UpperCAmelCase : Optional[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def lowerCAmelCase__ ( self : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
_UpperCAmelCase : Dict = 3 * ["""this is a negative prompt"""]
_UpperCAmelCase : List[Any] = negative_prompt
_UpperCAmelCase : int = 3 * [inputs["""prompt"""]]
# forward
_UpperCAmelCase : str = pipe(**_a )
_UpperCAmelCase : Dict = output.images[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[int] = self.get_dummy_inputs()
_UpperCAmelCase : List[Any] = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase : Tuple = []
for p in [prompt, negative_prompt]:
_UpperCAmelCase : str = pipe.tokenizer(
_a , padding="max_length" , max_length=pipe.tokenizer.model_max_length , truncation=_a , return_tensors="np" , )
_UpperCAmelCase : Union[str, Any] = text_inputs["""input_ids"""]
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
_UpperCAmelCase : List[Any] = embeds
# forward
_UpperCAmelCase : Tuple = pipe(**_a )
_UpperCAmelCase : Optional[Any] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
@property
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = ort.SessionOptions()
_UpperCAmelCase : int = False
return options
def lowerCAmelCase__ ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = OnnxStableDiffusionPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : Optional[Any] = """A painting of a squirrel eating a burger"""
np.random.seed(0 )
_UpperCAmelCase : Union[str, Any] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type="np" )
_UpperCAmelCase : Union[str, Any] = output.images
_UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : int = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = DDIMScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
_UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : Optional[int] = """open neural network exchange"""
_UpperCAmelCase : Optional[Any] = np.random.RandomState(0 )
_UpperCAmelCase : Any = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="np" )
_UpperCAmelCase : Dict = output.images
_UpperCAmelCase : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
_UpperCAmelCase : str = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : str = """open neural network exchange"""
_UpperCAmelCase : int = np.random.RandomState(0 )
_UpperCAmelCase : Union[str, Any] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="np" )
_UpperCAmelCase : List[Any] = output.images
_UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase : Union[str, Any] = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = 0
def test_callback_fn(lowerCamelCase__ : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict ) -> None:
_UpperCAmelCase : List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
_UpperCAmelCase : Optional[int] = latents[0, -3:, -3:, -1]
_UpperCAmelCase : str = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
_UpperCAmelCase : Union[str, Any] = latents[0, -3:, -3:, -1]
_UpperCAmelCase : Tuple = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : Any = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_a )
_UpperCAmelCase : List[Any] = """Andromeda galaxy in a bottle"""
_UpperCAmelCase : Optional[int] = np.random.RandomState(0 )
pipe(
prompt=_a , num_inference_steps=5 , guidance_scale=7.5 , generator=_a , callback=_a , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = OnnxStableDiffusionPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(_a , _a )
assert pipe.safety_checker is None
_UpperCAmelCase : Dict = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
_UpperCAmelCase : List[Any] = OnnxStableDiffusionPipeline.from_pretrained(_a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_UpperCAmelCase : Union[str, Any] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
| 709
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 0
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ):
@require_torch
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_UpperCAmelCase : int = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_UpperCAmelCase : Dict = '''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_UpperCAmelCase : Any = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCamelCase__ )
BertModel.from_pretrained(UpperCamelCase__ )
BertTokenizer.from_pretrained(UpperCamelCase__ )
pipeline(task="fill-mask" , model=UpperCamelCase__ )
# baseline - just load from_pretrained with normal network
_UpperCAmelCase : List[Any] = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_UpperCAmelCase : List[Any] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase : Any = '''1'''
_UpperCAmelCase : List[str] = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def lowerCAmelCase__ ( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase : str = '''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
_UpperCAmelCase : List[str] = '''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
_UpperCAmelCase : str = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
_UpperCAmelCase : Any = '''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCamelCase__ )
BertModel.from_pretrained(UpperCamelCase__ )
BertTokenizer.from_pretrained(UpperCamelCase__ )
pipeline(task="fill-mask" , model=UpperCamelCase__ )
# baseline - just load from_pretrained with normal network
_UpperCAmelCase : int = [sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
_UpperCAmelCase : Optional[int] = self.get_env()
_UpperCAmelCase : int = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = '''
from transformers import BertConfig, BertModel, BertTokenizer
'''
_UpperCAmelCase : Union[str, Any] = '''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
_UpperCAmelCase : Dict = '''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
_UpperCAmelCase : int = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_UpperCAmelCase : Dict = self.get_env()
_UpperCAmelCase : Optional[int] = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# next emulate no network
_UpperCAmelCase : List[Any] = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase : List[Any] = '''1'''
_UpperCAmelCase : Dict = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
@require_torch
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : int = '''
from transformers import pipeline
'''
_UpperCAmelCase : str = '''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
_UpperCAmelCase : Optional[int] = '''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
_UpperCAmelCase : int = self.get_env()
_UpperCAmelCase : Optional[int] = '''1'''
_UpperCAmelCase : int = [sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
_UpperCAmelCase : Dict = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" , result.stderr.decode().replace("\n" , "" ) , )
@require_torch
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = '''
from transformers import AutoModel
'''
_UpperCAmelCase : List[Any] = '''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
_UpperCAmelCase : int = [sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
_UpperCAmelCase : Tuple = self.get_env()
_UpperCAmelCase : str = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase : Any = '''1'''
_UpperCAmelCase : Any = subprocess.run(UpperCamelCase__ , env=UpperCamelCase__ , check=UpperCamelCase__ , capture_output=UpperCamelCase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("success" , result.stdout.decode() )
| 710
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 0
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=0.9_9_9 , __lowerCAmelCase="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__lowerCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__lowerCAmelCase ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
_UpperCAmelCase : List[Any] = []
for i in range(lowerCamelCase_ ):
_UpperCAmelCase : List[Any] = i / num_diffusion_timesteps
_UpperCAmelCase : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCamelCase_ ) / alpha_bar_fn(lowerCamelCase_ ) , lowerCamelCase_ ) )
return torch.tensor(lowerCamelCase_ , dtype=torch.floataa )
class lowerCAmelCase__ ( lowerCAmelCase__ , lowerCAmelCase__ ):
lowerCAmelCase : str = [e.name for e in KarrasDiffusionSchedulers]
lowerCAmelCase : Dict = 2
@register_to_config
def __init__( self : Dict , lowerCamelCase__ : List[Any] = 10_00 , lowerCamelCase__ : Any = 0.0_0_0_8_5 , lowerCamelCase__ : Any = 0.0_1_2 , lowerCamelCase__ : int = "linear" , lowerCamelCase__ : Union[str, Any] = None , lowerCamelCase__ : Optional[Any] = "epsilon" , lowerCamelCase__ : List[str] = False , lowerCamelCase__ : List[Any] = False , lowerCamelCase__ : str = 1.0 , lowerCamelCase__ : Tuple = "linspace" , lowerCamelCase__ : List[str] = 0 , ) ->List[Any]:
'''simple docstring'''
if trained_betas is not None:
_UpperCAmelCase : Optional[int] = torch.tensor(_lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
_UpperCAmelCase : Dict = torch.linspace(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_UpperCAmelCase : Optional[Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_UpperCAmelCase : Optional[Any] = betas_for_alpha_bar(_lowerCamelCase , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
_UpperCAmelCase : str = betas_for_alpha_bar(_lowerCamelCase , alpha_transform_type="exp" )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
_UpperCAmelCase : List[Any] = 1.0 - self.betas
_UpperCAmelCase : Optional[int] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Dict = use_karras_sigmas
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any]=None ) ->Optional[int]:
'''simple docstring'''
if schedule_timesteps is None:
_UpperCAmelCase : Any = self.timesteps
_UpperCAmelCase : int = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
_UpperCAmelCase : Union[str, Any] = 1 if len(_lowerCamelCase ) > 1 else 0
else:
_UpperCAmelCase : Tuple = timestep.cpu().item() if torch.is_tensor(_lowerCamelCase ) else timestep
_UpperCAmelCase : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , ) ->torch.FloatTensor:
'''simple docstring'''
_UpperCAmelCase : Any = self.index_for_timestep(_lowerCamelCase )
_UpperCAmelCase : Tuple = self.sigmas[step_index]
_UpperCAmelCase : Dict = sample / ((sigma**2 + 1) ** 0.5)
return sample
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Any , lowerCamelCase__ : int = None , lowerCamelCase__ : Dict = None , ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = num_inference_steps
_UpperCAmelCase : List[str] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
_UpperCAmelCase : str = np.linspace(0 , num_train_timesteps - 1 , _lowerCamelCase , dtype=_lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
_UpperCAmelCase : int = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase : Tuple = (np.arange(0 , _lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(_lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
_UpperCAmelCase : Dict = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
_UpperCAmelCase : int = (np.arange(_lowerCamelCase , 0 , -step_ratio )).round().copy().astype(_lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
_UpperCAmelCase : Dict = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
_UpperCAmelCase : List[Any] = np.log(_lowerCamelCase )
_UpperCAmelCase : int = np.interp(_lowerCamelCase , np.arange(0 , len(_lowerCamelCase ) ) , _lowerCamelCase )
if self.config.use_karras_sigmas:
_UpperCAmelCase : Dict = self._convert_to_karras(in_sigmas=_lowerCamelCase , num_inference_steps=self.num_inference_steps )
_UpperCAmelCase : Tuple = np.array([self._sigma_to_t(_lowerCamelCase , _lowerCamelCase ) for sigma in sigmas] )
_UpperCAmelCase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
_UpperCAmelCase : Optional[Any] = torch.from_numpy(_lowerCamelCase ).to(device=_lowerCamelCase )
_UpperCAmelCase : List[Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
_UpperCAmelCase : str = torch.from_numpy(_lowerCamelCase )
_UpperCAmelCase : Any = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_lowerCamelCase ).startswith("mps" ):
# mps does not support float64
_UpperCAmelCase : Optional[int] = timesteps.to(_lowerCamelCase , dtype=torch.floataa )
else:
_UpperCAmelCase : List[str] = timesteps.to(device=_lowerCamelCase )
# empty dt and derivative
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : List[str] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
_UpperCAmelCase : List[Any] = defaultdict(_lowerCamelCase )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : List[Any] = np.log(_lowerCamelCase )
# get distribution
_UpperCAmelCase : Optional[Any] = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
_UpperCAmelCase : Optional[Any] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
_UpperCAmelCase : Tuple = low_idx + 1
_UpperCAmelCase : Optional[Any] = log_sigmas[low_idx]
_UpperCAmelCase : str = log_sigmas[high_idx]
# interpolate sigmas
_UpperCAmelCase : Optional[Any] = (low - log_sigma) / (low - high)
_UpperCAmelCase : Optional[int] = np.clip(_lowerCamelCase , 0 , 1 )
# transform interpolation to time range
_UpperCAmelCase : int = (1 - w) * low_idx + w * high_idx
_UpperCAmelCase : Optional[int] = t.reshape(sigma.shape )
return t
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : str , lowerCamelCase__ : Dict ) ->torch.FloatTensor:
'''simple docstring'''
_UpperCAmelCase : List[Any] = in_sigmas[-1].item()
_UpperCAmelCase : List[str] = in_sigmas[0].item()
_UpperCAmelCase : Optional[int] = 7.0 # 7.0 is the value used in the paper
_UpperCAmelCase : List[Any] = np.linspace(0 , 1 , _lowerCamelCase )
_UpperCAmelCase : int = sigma_min ** (1 / rho)
_UpperCAmelCase : Tuple = sigma_max ** (1 / rho)
_UpperCAmelCase : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def lowerCAmelCase__ ( self : Tuple ) ->Any:
'''simple docstring'''
return self.dt is None
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[str] = True , ) ->Union[SchedulerOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase : Dict = self.index_for_timestep(_lowerCamelCase )
# advance index counter by 1
_UpperCAmelCase : Optional[int] = timestep.cpu().item() if torch.is_tensor(_lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
_UpperCAmelCase : List[Any] = self.sigmas[step_index]
_UpperCAmelCase : int = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
_UpperCAmelCase : List[str] = self.sigmas[step_index - 1]
_UpperCAmelCase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
_UpperCAmelCase : Optional[int] = sigma_hat if self.state_in_first_order else sigma_next
_UpperCAmelCase : int = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
_UpperCAmelCase : Dict = sigma_hat if self.state_in_first_order else sigma_next
_UpperCAmelCase : Optional[int] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
_UpperCAmelCase : Any = model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
_UpperCAmelCase : Any = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
_UpperCAmelCase : int = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
_UpperCAmelCase : Dict = sigma_next - sigma_hat
# store for 2nd order step
_UpperCAmelCase : Tuple = derivative
_UpperCAmelCase : List[Any] = dt
_UpperCAmelCase : Optional[Any] = sample
else:
# 2. 2nd order / Heun's method
_UpperCAmelCase : Optional[Any] = (sample - pred_original_sample) / sigma_next
_UpperCAmelCase : List[Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
_UpperCAmelCase : Dict = self.dt
_UpperCAmelCase : int = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : str = None
_UpperCAmelCase : Optional[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_lowerCamelCase )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , ) ->torch.FloatTensor:
'''simple docstring'''
_UpperCAmelCase : Dict = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_lowerCamelCase ):
# mps does not support float64
_UpperCAmelCase : List[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
_UpperCAmelCase : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
_UpperCAmelCase : Dict = self.timesteps.to(original_samples.device )
_UpperCAmelCase : int = timesteps.to(original_samples.device )
_UpperCAmelCase : int = [self.index_for_timestep(_lowerCamelCase , _lowerCamelCase ) for t in timesteps]
_UpperCAmelCase : Union[str, Any] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
_UpperCAmelCase : Union[str, Any] = sigma.unsqueeze(-1 )
_UpperCAmelCase : Optional[Any] = original_samples + noise * sigma
return noisy_samples
def __len__( self : Any ) ->Dict:
'''simple docstring'''
return self.config.num_train_timesteps
| 711
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowerCAmelCase (__lowerCAmelCase ):
return 1 / (1 + np.exp(-z ))
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return (-y * np.log(a__ ) - (1 - y) * np.log(1 - h )).mean()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Any = np.dot(a__ , a__ )
return np.sum(y * scores - np.log(1 + np.exp(a__ ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=70_000 ):
_UpperCAmelCase : Any = np.zeros(x.shape[1] )
for iterations in range(a__ ):
_UpperCAmelCase : List[Any] = np.dot(a__ , a__ )
_UpperCAmelCase : List[Any] = sigmoid_function(a__ )
_UpperCAmelCase : Tuple = np.dot(x.T , h - y ) / y.size
_UpperCAmelCase : Optional[int] = theta - alpha * gradient # updating the weights
_UpperCAmelCase : Union[str, Any] = np.dot(a__ , a__ )
_UpperCAmelCase : Any = sigmoid_function(a__ )
_UpperCAmelCase : Union[str, Any] = cost_function(a__ , a__ )
if iterations % 100 == 0:
print(F"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowerCamelCase__ = datasets.load_iris()
lowerCamelCase__ = iris.data[:, :2]
lowerCamelCase__ = (iris.target != 0) * 1
lowerCamelCase__ = 0.1
lowerCamelCase__ = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def __lowerCAmelCase (__lowerCAmelCase ):
return sigmoid_function(
np.dot(a__ , a__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((lowerCamelCase__) ,(lowerCamelCase__)) = (x[:, 0].min(), x[:, 0].max())
((lowerCamelCase__) ,(lowerCamelCase__)) = (x[:, 1].min(), x[:, 1].max())
((lowerCamelCase__) ,(lowerCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowerCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
lowerCamelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 713
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 0
|
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = 256
class lowerCAmelCase__ ( UpperCamelCase__ ):
lowerCAmelCase : Dict = ["melgan"]
def __init__( self : str , lowerCamelCase__ : SpectrogramNotesEncoder , lowerCamelCase__ : SpectrogramContEncoder , lowerCamelCase__ : TaFilmDecoder , lowerCamelCase__ : DDPMScheduler , lowerCamelCase__ : OnnxRuntimeModel if is_onnx_available() else Any , ) ->None:
'''simple docstring'''
super().__init__()
# From MELGAN
_UpperCAmelCase : Optional[int] = math.log(1E-5 ) # Matches MelGAN training.
_UpperCAmelCase : List[Any] = 4.0 # Largest value for most examples
_UpperCAmelCase : Optional[Any] = 1_28
self.register_modules(
notes_encoder=__A , continuous_encoder=__A , decoder=__A , scheduler=__A , melgan=__A , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple=(-1.0, 1.0) , lowerCamelCase__ : Tuple=False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Any = output_range
if clip:
_UpperCAmelCase : List[Any] = torch.clip(__A , self.min_value , self.max_value )
# Scale to [0, 1].
_UpperCAmelCase : str = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=(-1.0, 1.0) , lowerCamelCase__ : Optional[int]=False ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = input_range
_UpperCAmelCase : Optional[int] = torch.clip(__A , __A , __A ) if clip else outputs
# Scale to [0, 1].
_UpperCAmelCase : Optional[Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = input_tokens > 0
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.notes_encoder(
encoder_input_tokens=__A , encoder_inputs_mask=__A )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.continuous_encoder(
encoder_inputs=__A , encoder_inputs_mask=__A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = noise_time
if not torch.is_tensor(__A ):
_UpperCAmelCase : List[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(__A ) and len(timesteps.shape ) == 0:
_UpperCAmelCase : Union[str, Any] = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_UpperCAmelCase : Any = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
_UpperCAmelCase : int = self.decoder(
encodings_and_masks=__A , decoder_input_tokens=__A , decoder_noise_time=__A )
return logits
@torch.no_grad()
def __call__( self : Tuple , lowerCamelCase__ : List[List[int]] , lowerCamelCase__ : Optional[torch.Generator] = None , lowerCamelCase__ : int = 1_00 , lowerCamelCase__ : bool = True , lowerCamelCase__ : str = "numpy" , lowerCamelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCamelCase__ : int = 1 , ) ->Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__A , __A ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(__A )}.""" )
_UpperCAmelCase : Dict = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
_UpperCAmelCase : str = np.zeros([1, 0, self.n_dims] , np.floataa )
_UpperCAmelCase : int = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__A , device=self.device )
for i, encoder_input_tokens in enumerate(__A ):
if i == 0:
_UpperCAmelCase : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
_UpperCAmelCase : int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
_UpperCAmelCase : Any = ones
_UpperCAmelCase : Union[str, Any] = self.scale_features(
__A , output_range=[-1.0, 1.0] , clip=__A )
_UpperCAmelCase : Any = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=__A , continuous_mask=__A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
_UpperCAmelCase : Tuple = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_UpperCAmelCase : Optional[Any] = self.decode(
encodings_and_masks=__A , input_tokens=__A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
_UpperCAmelCase : List[str] = self.scheduler.step(__A , __A , __A , generator=__A ).prev_sample
_UpperCAmelCase : List[Any] = self.scale_to_features(__A , input_range=[-1.0, 1.0] )
_UpperCAmelCase : Union[str, Any] = mel[:1]
_UpperCAmelCase : Any = mel.cpu().float().numpy()
_UpperCAmelCase : Tuple = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__A , __A )
logger.info("Generated segment" , __A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
_UpperCAmelCase : Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
_UpperCAmelCase : Tuple = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__A )
| 714
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 0
|
'''simple docstring'''
lowerCamelCase__ = range(2, 20 + 1)
lowerCamelCase__ = [10**k for k in range(ks[-1] + 1)]
lowerCamelCase__ = {}
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = sum(a_i[j] for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ) )
_UpperCAmelCase : Any = sum(a_i[j] * base[j] for j in range(min(len(_UpperCAmelCase ) , _UpperCAmelCase ) ) )
_UpperCAmelCase , _UpperCAmelCase : Tuple = 0, 0
_UpperCAmelCase : Any = n - i
_UpperCAmelCase : List[Any] = memo.get(_UpperCAmelCase )
if sub_memo is not None:
_UpperCAmelCase : Optional[Any] = sub_memo.get(_UpperCAmelCase )
if jumps is not None and len(_UpperCAmelCase ) > 0:
# find and make the largest jump without going over
_UpperCAmelCase : str = -1
for _k in range(len(_UpperCAmelCase ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCAmelCase : int = _k
break
if max_jump >= 0:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCAmelCase : int = diff + c
for j in range(min(_UpperCAmelCase , len(_UpperCAmelCase ) ) ):
_UpperCAmelCase , _UpperCAmelCase : str = divmod(_UpperCAmelCase , 10 )
if new_c > 0:
add(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
else:
_UpperCAmelCase : int = []
else:
_UpperCAmelCase : List[Any] = {c: []}
_UpperCAmelCase : Tuple = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCAmelCase , _UpperCAmelCase : List[Any] = next_term(_UpperCAmelCase , k - 1 , i + dn , _UpperCAmelCase )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCAmelCase , _UpperCAmelCase : Any = compute(_UpperCAmelCase , _UpperCAmelCase , i + dn , _UpperCAmelCase )
diff += _diff
dn += terms_jumped
_UpperCAmelCase : Tuple = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCAmelCase : int = 0
while j < len(_UpperCAmelCase ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(_UpperCAmelCase , (diff, dn, k) )
return (diff, dn)
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if i >= n:
return 0, i
if k > len(_UpperCAmelCase ):
a_i.extend([0 for _ in range(k - len(_UpperCAmelCase ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCAmelCase : Optional[int] = i
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = 0, 0, 0
for j in range(len(_UpperCAmelCase ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCAmelCase : Optional[Any] = ds_c + ds_b
diff += addend
_UpperCAmelCase : str = 0
for j in range(_UpperCAmelCase ):
_UpperCAmelCase : List[Any] = a_i[j] + addend
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = divmod(_UpperCAmelCase , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return diff, i - start_i
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
for j in range(_UpperCAmelCase , len(_UpperCAmelCase ) ):
_UpperCAmelCase : int = digits[j] + addend
if s >= 10:
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = divmod(_UpperCAmelCase , 10 )
_UpperCAmelCase : List[Any] = addend // 10 + quotient
else:
_UpperCAmelCase : Optional[int] = s
_UpperCAmelCase : str = addend // 10
if addend == 0:
break
while addend > 0:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = divmod(_UpperCAmelCase , 10 )
digits.append(_UpperCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase = 10**15 ):
_UpperCAmelCase : Tuple = [1]
_UpperCAmelCase : Optional[Any] = 1
_UpperCAmelCase : Union[str, Any] = 0
while True:
_UpperCAmelCase , _UpperCAmelCase : Tuple = next_term(_UpperCAmelCase , 20 , i + dn , _UpperCAmelCase )
dn += terms_jumped
if dn == n - i:
break
_UpperCAmelCase : str = 0
for j in range(len(_UpperCAmelCase ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 715
|
'''simple docstring'''
from math import factorial
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = real
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Any = [1] * rank
else:
_UpperCAmelCase : Dict = rank
def __repr__( self : str ) ->List[str]:
'''simple docstring'''
return (
F"""{self.real}+"""
F"""{'+'.join(str(lowerCamelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase__ ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase__ )
def __add__( self : Dict , lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return Dual(self.real + other , self.duals )
_UpperCAmelCase : Optional[int] = self.duals.copy()
_UpperCAmelCase : Optional[int] = other.duals.copy()
if len(lowerCamelCase__ ) > len(lowerCamelCase__ ):
o_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
elif len(lowerCamelCase__ ) < len(lowerCamelCase__ ):
s_dual.extend([1] * (len(lowerCamelCase__ ) - len(lowerCamelCase__ )) )
_UpperCAmelCase : Union[str, Any] = []
for i in range(len(lowerCamelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase__ )
lowerCAmelCase : Tuple = __add__
def __sub__( self : List[Any] , lowerCamelCase__ : Union[str, Any] ) ->Dict:
'''simple docstring'''
return self + other * -1
def __mul__( self : List[str] , lowerCamelCase__ : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase__ )
lowerCAmelCase : Union[str, Any] = __mul__
def __truediv__( self : Optional[Any] , lowerCamelCase__ : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase__ )
raise ValueError
def __floordiv__( self : str , lowerCamelCase__ : str ) ->List[str]:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Tuple = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase__ )
raise ValueError
def __pow__( self : Tuple , lowerCamelCase__ : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
if n < 0 or isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
_UpperCAmelCase : str = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if not callable(__lowerCAmelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(__lowerCAmelCase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise ValueError("differentiate() requires an int as input for order" )
_UpperCAmelCase : int = Dual(__lowerCAmelCase , 1 )
_UpperCAmelCase : Optional[int] = func(__lowerCAmelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCAmelCase (__lowerCAmelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 40
| 0
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCamelCase__ = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( UpperCamelCase_ ):
lowerCAmelCase : Optional[int] = "retribert"
def __init__( self : Optional[Any] , lowerCamelCase__ : List[str]=3_05_22 , lowerCamelCase__ : int=7_68 , lowerCamelCase__ : Dict=8 , lowerCamelCase__ : List[Any]=12 , lowerCamelCase__ : Optional[Any]=30_72 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : str=5_12 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : List[Any]=0.0_2 , lowerCamelCase__ : Tuple=1E-12 , lowerCamelCase__ : Union[str, Any]=True , lowerCamelCase__ : Tuple=1_28 , lowerCamelCase__ : str=0 , **lowerCamelCase__ : Tuple , ) ->Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=__a , **__a )
_UpperCAmelCase : Optional[int] = vocab_size
_UpperCAmelCase : Union[str, Any] = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : int = num_attention_heads
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : Dict = intermediate_size
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : int = attention_probs_dropout_prob
_UpperCAmelCase : Dict = max_position_embeddings
_UpperCAmelCase : int = type_vocab_size
_UpperCAmelCase : Union[str, Any] = initializer_range
_UpperCAmelCase : int = layer_norm_eps
_UpperCAmelCase : List[Any] = share_encoders
_UpperCAmelCase : List[Any] = projection_dim
| 716
|
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowerCamelCase__ = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __lowerCAmelCase (__lowerCAmelCase ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : int = ["image_processor", "tokenizer"]
lowerCAmelCase : Optional[int] = "FlavaImageProcessor"
lowerCAmelCase : Tuple = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : str , lowerCamelCase__ : Dict=None , lowerCamelCase__ : List[str]=None , **lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
_UpperCAmelCase : Tuple = kwargs.pop("feature_extractor" )
_UpperCAmelCase : Tuple = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A , __A )
_UpperCAmelCase : int = self.image_processor
def __call__( self : Any , lowerCamelCase__ : Optional[ImageInput] = None , lowerCamelCase__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = False , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : List[Any] , ) ->Any:
'''simple docstring'''
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
if images is not None:
_UpperCAmelCase : Any = self.image_processor(
__A , return_image_mask=__A , return_codebook_pixels=__A , return_tensors=__A , **__A , )
if text is not None and images is not None:
encoding.update(__A )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : Union[str, Any] ) ->str:
'''simple docstring'''
return self.tokenizer.batch_decode(*__A , **__A )
def lowerCAmelCase__ ( self : Union[str, Any] , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : Tuple ) ->Union[str, Any]:
'''simple docstring'''
return self.tokenizer.decode(*__A , **__A )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = self.tokenizer.model_input_names
_UpperCAmelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , )
return self.image_processor_class
@property
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , )
return self.image_processor
| 717
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : int , lowerCamelCase__ : str , lowerCamelCase__ : str=13 , lowerCamelCase__ : Dict=7 , lowerCamelCase__ : str=True , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Dict=True , lowerCamelCase__ : int=True , lowerCamelCase__ : Tuple=99 , lowerCamelCase__ : Optional[int]=32 , lowerCamelCase__ : str=5 , lowerCamelCase__ : List[Any]=4 , lowerCamelCase__ : Any=37 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : int=0.1 , lowerCamelCase__ : Tuple=0.1 , lowerCamelCase__ : Optional[int]=5_12 , lowerCamelCase__ : Any=16 , lowerCamelCase__ : Optional[Any]=2 , lowerCamelCase__ : Optional[Any]=0.0_2 , lowerCamelCase__ : Optional[int]=4 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : str = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : List[Any] = seq_length
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : int = use_attention_mask
_UpperCAmelCase : List[Any] = use_token_type_ids
_UpperCAmelCase : int = use_labels
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = num_attention_heads
_UpperCAmelCase : Tuple = intermediate_size
_UpperCAmelCase : List[Any] = hidden_act
_UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
_UpperCAmelCase : List[str] = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = max_position_embeddings
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : List[str] = initializer_range
_UpperCAmelCase : Union[str, Any] = num_choices
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Any = None
if self.use_attention_mask:
_UpperCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : int = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = config_and_inputs
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Tuple = True
lowerCAmelCase : Tuple = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase__ ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : Any = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : str = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )[0]
_UpperCAmelCase : int = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
_UpperCAmelCase : int = np.array(
[[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=lowerCamelCase__ )
_UpperCAmelCase : List[Any] = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
_UpperCAmelCase : Optional[Any] = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
_UpperCAmelCase : str = np.array(
[[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 40
| 0
|
'''simple docstring'''
lowerCamelCase__ = {
'a': 'AAAAA',
'b': 'AAAAB',
'c': 'AAABA',
'd': 'AAABB',
'e': 'AABAA',
'f': 'AABAB',
'g': 'AABBA',
'h': 'AABBB',
'i': 'ABAAA',
'j': 'BBBAA',
'k': 'ABAAB',
'l': 'ABABA',
'm': 'ABABB',
'n': 'ABBAA',
'o': 'ABBAB',
'p': 'ABBBA',
'q': 'ABBBB',
'r': 'BAAAA',
's': 'BAAAB',
't': 'BAABA',
'u': 'BAABB',
'v': 'BBBAB',
'w': 'BABAA',
'x': 'BABAB',
'y': 'BABBA',
'z': 'BABBB',
' ': ' ',
}
lowerCamelCase__ = {value: key for key, value in encode_dict.items()}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[Any] = """"""
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception("encode() accepts only letters of the alphabet and spaces" )
return encoded
def __lowerCAmelCase (__lowerCAmelCase ):
if set(_lowerCamelCase ) - {"A", "B", " "} != set():
raise Exception("decode() accepts only 'A', 'B' and spaces" )
_UpperCAmelCase : Tuple = """"""
for word in coded.split():
while len(_lowerCamelCase ) != 0:
decoded += decode_dict[word[:5]]
_UpperCAmelCase : Optional[int] = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 718
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 40
| 0
|
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
lowerCamelCase__ = 3
def __lowerCAmelCase (__lowerCAmelCase ):
print("Generating primitive root of p" )
while True:
_UpperCAmelCase : Tuple = random.randrange(3 , lowercase_ )
if pow(lowercase_ , 2 , lowercase_ ) == 1:
continue
if pow(lowercase_ , lowercase_ , lowercase_ ) == 1:
continue
return g
def __lowerCAmelCase (__lowerCAmelCase ):
print("Generating prime p..." )
_UpperCAmelCase : Union[str, Any] = rabin_miller.generate_large_prime(lowercase_ ) # select large prime number.
_UpperCAmelCase : Optional[int] = primitive_root(lowercase_ ) # one primitive root on modulo p.
_UpperCAmelCase : List[Any] = random.randrange(3 , lowercase_ ) # private_key -> have to be greater than 2 for safety.
_UpperCAmelCase : Tuple = cryptomath.find_mod_inverse(pow(lowercase_ , lowercase_ , lowercase_ ) , lowercase_ )
_UpperCAmelCase : Union[str, Any] = (key_size, e_a, e_a, p)
_UpperCAmelCase : str = (key_size, d)
return public_key, private_key
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if os.path.exists(F"""{name}_pubkey.txt""" ) or os.path.exists(F"""{name}_privkey.txt""" ):
print("\nWARNING:" )
print(
F"""\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n"""
"Use a different name or delete these files and re-run this program." )
sys.exit()
_UpperCAmelCase : Dict = generate_key(lowercase_ )
print(F"""\nWriting public key to file {name}_pubkey.txt...""" )
with open(F"""{name}_pubkey.txt""" , "w" ) as fo:
fo.write(F"""{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}""" )
print(F"""Writing private key to file {name}_privkey.txt...""" )
with open(F"""{name}_privkey.txt""" , "w" ) as fo:
fo.write(F"""{private_key[0]},{private_key[1]}""" )
def __lowerCAmelCase ():
print("Making key files..." )
make_key_files("elgamal" , 2_048 )
print("Key files generation successful" )
if __name__ == "__main__":
main()
| 719
|
'''simple docstring'''
import os
def __lowerCAmelCase ():
_UpperCAmelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , "num.txt" )
with open(__lowerCAmelCase ) as file_hand:
return str(sum(int(__lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
from math import factorial
def __lowerCAmelCase (__lowerCAmelCase = 100 ):
return sum(int(lowerCAmelCase__ ) for x in str(factorial(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 720
|
'''simple docstring'''
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
lowerCamelCase__ = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=1 ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = tokenizer
_UpperCAmelCase : Tuple = dataset
_UpperCAmelCase : Union[str, Any] = len(lowerCamelCase__ ) if n_tasks is None else n_tasks
_UpperCAmelCase : Any = n_copies
def __iter__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]["prompt"].strip() )
_UpperCAmelCase : Optional[Any] = self.tokenizer(lowerCamelCase__ , padding=lowerCamelCase__ , return_tensors="pt" )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : List[str] = start_length
_UpperCAmelCase : Union[str, Any] = eof_strings
_UpperCAmelCase : Union[str, Any] = tokenizer
def __call__( self : Tuple , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , **lowerCamelCase__ : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_UpperCAmelCase : Optional[int] = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(lowerCamelCase__ )
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Tuple = re.split("(%s)" % "|".join(__lowerCAmelCase ) , __lowerCAmelCase )
# last string should be ""
return "".join(string_list[:-2] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=20 , **__lowerCAmelCase ):
_UpperCAmelCase : Tuple = defaultdict(__lowerCAmelCase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowerCAmelCase ) ):
with torch.no_grad():
_UpperCAmelCase : Tuple = batch["ids"].shape[-1]
_UpperCAmelCase : Optional[int] = accelerator.unwrap_model(__lowerCAmelCase ).generate(
input_ids=batch["ids"][:, : batch["input_len"]] , num_return_sequences=__lowerCAmelCase , **__lowerCAmelCase )
# each task is generated batch_size times
_UpperCAmelCase : str = batch["task_id"].repeat(__lowerCAmelCase )
_UpperCAmelCase : str = accelerator.pad_across_processes(
__lowerCAmelCase , dim=1 , pad_index=tokenizer.pad_token_id )
_UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((generated_tokens, generated_tasks) )
_UpperCAmelCase : Dict = generated_tokens.cpu().numpy()
_UpperCAmelCase : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowerCAmelCase , __lowerCAmelCase ):
gen_token_dict[task].append(__lowerCAmelCase )
_UpperCAmelCase : int = [[] for _ in range(__lowerCAmelCase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_UpperCAmelCase : List[Any] = tokenizer.decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , clean_up_tokenization_spaces=__lowerCAmelCase )
code_gens[task].append(remove_last_block(__lowerCAmelCase ) )
return code_gens
def __lowerCAmelCase ():
# Setup configuration
_UpperCAmelCase : List[str] = HfArgumentParser(__lowerCAmelCase )
_UpperCAmelCase : Tuple = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_UpperCAmelCase : Any = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_UpperCAmelCase : List[str] = "false"
if args.num_workers is None:
_UpperCAmelCase : List[str] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_UpperCAmelCase : List[Any] = Accelerator()
set_seed(args.seed , device_specific=__lowerCAmelCase )
# Load model and tokenizer
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(args.model_ckpt )
_UpperCAmelCase : List[str] = tokenizer.eos_token
_UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_UpperCAmelCase : Tuple = {
"do_sample": args.do_sample,
"temperature": args.temperature,
"max_new_tokens": args.max_new_tokens,
"top_p": args.top_p,
"top_k": args.top_k,
"stopping_criteria": StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowerCAmelCase , __lowerCAmelCase )] ),
}
# Load evaluation dataset and metric
_UpperCAmelCase : Union[str, Any] = load_dataset("openai_humaneval" )
_UpperCAmelCase : List[Any] = load_metric("code_eval" )
_UpperCAmelCase : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval["test"] )
_UpperCAmelCase : Any = args.n_samples // args.batch_size
_UpperCAmelCase : Tuple = TokenizedDataset(__lowerCAmelCase , human_eval["test"] , n_copies=__lowerCAmelCase , n_tasks=__lowerCAmelCase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_UpperCAmelCase : List[str] = DataLoader(__lowerCAmelCase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_UpperCAmelCase : Optional[int] = code_eval_metric.compute(references=[""] , predictions=[[""]] )
except ValueError as exception:
print(
"Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL=\"1\"`"
" flag to enable code evaluation." )
raise exception
_UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Dict = complete_code(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , n_tasks=__lowerCAmelCase , batch_size=args.batch_size , **__lowerCAmelCase , )
if accelerator.is_main_process:
_UpperCAmelCase : List[Any] = []
for task in tqdm(range(__lowerCAmelCase ) ):
_UpperCAmelCase : str = human_eval["test"][task]["test"]
_UpperCAmelCase : Union[str, Any] = F"""check({human_eval['test'][task]['entry_point']})"""
references.append("\n" + test_func + "\n" + entry_point )
# Evaluate completions with "code_eval" metric
_UpperCAmelCase , _UpperCAmelCase : str = code_eval_metric.compute(
references=__lowerCAmelCase , predictions=__lowerCAmelCase , num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , "w" ) as fp:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {"""configuration_van""": ["""VAN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """VanConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""VAN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""VanForImageClassification""",
"""VanModel""",
"""VanPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 721
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 40
| 0
|
'''simple docstring'''
from itertools import permutations
def __lowerCAmelCase (__lowerCAmelCase ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase : Tuple = [7, 11, 13, 17]
for i, test in enumerate(__lowerCAmelCase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def __lowerCAmelCase (__lowerCAmelCase = 10 ):
return sum(
int("".join(map(__lowerCAmelCase , __lowerCAmelCase ) ) )
for num in permutations(range(__lowerCAmelCase ) )
if is_substring_divisible(__lowerCAmelCase ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 700
|
'''simple docstring'''
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse("0.11.0" ).release:
# old versions of hfh don't url-encode the file path
_UpperCAmelCase : str = quote(__lowerCAmelCase )
return hfh.hf_hub_url(__lowerCAmelCase , __lowerCAmelCase , repo_type="dataset" , revision=__lowerCAmelCase )
| 40
| 0
|
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class lowerCAmelCase__ ( _UpperCAmelCase ):
lowerCAmelCase : Optional[int] = 0
lowerCAmelCase : Any = False
lowerCAmelCase : str = 3.0
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"a": 2} )
self.assertDictEqual(MockClass(a=2 , b=lowercase__ ).to_kwargs() , {"a": 2, "b": True} )
self.assertDictEqual(MockClass(a=2 , c=2.2_5 ).to_kwargs() , {"a": 2, "c": 2.2_5} )
@require_cuda
def lowerCAmelCase__ ( self : int ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
_UpperCAmelCase : List[Any] = Accelerator(mixed_precision="fp16" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
_UpperCAmelCase : List[str] = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_0_2_4.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , lowercase__ )
@require_multi_gpu
def lowerCAmelCase__ ( self : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : int = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(lowercase__ , env=os.environ.copy() )
if __name__ == "__main__":
lowerCamelCase__ = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowerCamelCase__ = Accelerator(kwargs_handlers=[ddp_scaler])
lowerCamelCase__ = torch.nn.Linear(100, 200)
lowerCamelCase__ = accelerator.prepare(model)
# Check the values changed in kwargs
lowerCamelCase__ = ''
lowerCamelCase__ = model.bucket_bytes_cap // (1_024 * 1_024)
if observed_bucket_cap_map != 15:
error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 701
|
'''simple docstring'''
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase : int = "pixel_values"
lowerCAmelCase : Dict = False
lowerCAmelCase : Union[str, Any] = TimmBackboneConfig
def __init__( self : List[str] , lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Dict:
'''simple docstring'''
requires_backends(self , "timm" )
super().__init__(lowerCamelCase__ )
_UpperCAmelCase : Any = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(lowerCamelCase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
_UpperCAmelCase : Optional[Any] = getattr(lowerCamelCase__ , "use_pretrained_backbone" , lowerCamelCase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
_UpperCAmelCase : int = config.out_indices if getattr(lowerCamelCase__ , "out_indices" , lowerCamelCase__ ) is not None else (-1,)
_UpperCAmelCase : List[Any] = timm.create_model(
config.backbone , pretrained=lowerCamelCase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowerCamelCase__ , **lowerCamelCase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
_UpperCAmelCase : List[str] = self._backbone.return_layers
_UpperCAmelCase : Optional[int] = {layer["module"]: str(lowerCamelCase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowerCamelCase__ )
@classmethod
def lowerCAmelCase__ ( cls : List[str] , lowerCamelCase__ : str , *lowerCamelCase__ : Tuple , **lowerCamelCase__ : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
_UpperCAmelCase : Any = kwargs.pop("config" , TimmBackboneConfig() )
_UpperCAmelCase : Dict = kwargs.pop("use_timm_backbone" , lowerCamelCase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
_UpperCAmelCase : str = kwargs.pop("num_channels" , config.num_channels )
_UpperCAmelCase : Dict = kwargs.pop("features_only" , config.features_only )
_UpperCAmelCase : str = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
_UpperCAmelCase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
_UpperCAmelCase : Dict = TimmBackboneConfig(
backbone=lowerCamelCase__ , num_channels=lowerCamelCase__ , features_only=lowerCamelCase__ , use_pretrained_backbone=lowerCamelCase__ , out_indices=lowerCamelCase__ , )
return super()._from_config(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=None , lowerCamelCase__ : Union[str, Any]=None , **lowerCamelCase__ : Dict ) ->Union[BackboneOutput, Tuple[Tensor, ...]]:
'''simple docstring'''
_UpperCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_UpperCAmelCase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_UpperCAmelCase : Dict = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
_UpperCAmelCase : Optional[int] = self._all_layers
_UpperCAmelCase : List[str] = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self._return_layers
_UpperCAmelCase : Tuple = tuple(hidden_states[i] for i in self.out_indices )
else:
_UpperCAmelCase : Any = self._backbone(lowerCamelCase__ , **lowerCamelCase__ )
_UpperCAmelCase : Tuple = None
_UpperCAmelCase : Dict = tuple(lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = tuple(lowerCamelCase__ ) if hidden_states is not None else None
if not return_dict:
_UpperCAmelCase : Dict = (feature_maps,)
if output_hidden_states:
_UpperCAmelCase : List[str] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowerCamelCase__ , hidden_states=lowerCamelCase__ , attentions=lowerCamelCase__ )
| 40
| 0
|
def __lowerCAmelCase (__lowerCAmelCase ):
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
_UpperCAmelCase : Dict = F"""The input value of [n={number}] has to be > 0"""
raise ValueError(SCREAMING_SNAKE_CASE_ )
else:
_UpperCAmelCase : str = sylvester(number - 1 )
_UpperCAmelCase : Dict = num - 1
_UpperCAmelCase : Dict = num
return lower * upper + 1
if __name__ == "__main__":
print(F'''The 8th number in Sylvester\'s sequence: {sylvester(8)}''')
| 702
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 40
| 0
|
'''simple docstring'''
import functools
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Tuple = len(__lowercase )
_UpperCAmelCase : Any = len(__lowercase )
@functools.cache
def min_distance(__lowerCAmelCase , __lowerCAmelCase ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
_UpperCAmelCase : Union[str, Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __lowercase ) , 1 + min_distance(__lowercase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any]=2 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Union[str, Any]=False , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Tuple=32 * 8 , lowerCamelCase__ : int=32 * 8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : Any=64 , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Dict = is_training
_UpperCAmelCase : Optional[Any] = use_auxiliary_loss
_UpperCAmelCase : Dict = num_queries
_UpperCAmelCase : Dict = num_channels
_UpperCAmelCase : Union[str, Any] = min_size
_UpperCAmelCase : Optional[int] = max_size
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : Optional[int] = hidden_dim
_UpperCAmelCase : Any = hidden_dim
def lowerCAmelCase__ ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCamelCase__ ) > 0.5
).float()
_UpperCAmelCase : int = (torch.rand((self.batch_size, self.num_labels) , device=lowerCamelCase__ ) > 0.5).long()
_UpperCAmelCase : Dict = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCAmelCase__ ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_UpperCAmelCase : List[str] = self.num_queries
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Union[str, Any] = [1, 1, 1, 1]
_UpperCAmelCase : Any = self.num_channels
_UpperCAmelCase : int = 64
_UpperCAmelCase : int = 1_28
_UpperCAmelCase : int = self.hidden_dim
_UpperCAmelCase : List[Any] = self.hidden_dim
_UpperCAmelCase : Any = self.hidden_dim
return config
def lowerCAmelCase__ ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = self.prepare_config_and_inputs()
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = output.encoder_hidden_states
_UpperCAmelCase : List[str] = output.pixel_decoder_hidden_states
_UpperCAmelCase : Optional[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCamelCase__ ) , config.decoder_layers )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict=False ) ->str:
'''simple docstring'''
with torch.no_grad():
_UpperCAmelCase : List[Any] = MaskaFormerModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : int = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
def comm_check_on_output(lowerCamelCase__ : Dict ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
_UpperCAmelCase : Union[str, Any] = model(pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ )
_UpperCAmelCase : int = model(lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model(
pixel_values=lowerCamelCase__ , pixel_mask=lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
comm_check_on_output(lowerCamelCase__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : Optional[int] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : str = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Any = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Any = False
def lowerCAmelCase__ ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = MaskaFormerModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*lowerCamelCase__ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : List[str] = model_class(lowerCamelCase__ )
_UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : Tuple = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[int] ) ->Any:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_UpperCAmelCase : str = MaskaFormerModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : str = (self.model_tester.min_size,) * 2
_UpperCAmelCase : Optional[Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=lowerCamelCase__ ),
"mask_labels": torch.randn((2, 10, *size) , device=lowerCamelCase__ ),
"class_labels": torch.zeros(2 , 10 , device=lowerCamelCase__ ).long(),
}
_UpperCAmelCase : int = self.model_tester.get_config()
_UpperCAmelCase : str = MaskaFormerForUniversalSegmentation(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : str = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(lowerCamelCase__ , **lowerCamelCase__ , output_hidden_states=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ , output_attentions=lowerCamelCase__ )
self.assertTrue(outputs.attentions is not None )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase : Optional[Any] = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Optional[int] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ ).loss
loss.backward()
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = self.all_model_classes[1]
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase : Union[str, Any] = True
_UpperCAmelCase : Tuple = True
_UpperCAmelCase : List[Any] = model_class(lowerCamelCase__ ).to(lowerCamelCase__ )
model.train()
_UpperCAmelCase : Any = model(lowerCamelCase__ , mask_labels=lowerCamelCase__ , class_labels=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_UpperCAmelCase : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_UpperCAmelCase : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCamelCase__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase__ = 1e-4
def __lowerCAmelCase ():
_UpperCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : str ) ->str:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCAmelCase__ ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ )
_UpperCAmelCase : int = self.default_image_processor
_UpperCAmelCase : Optional[Any] = prepare_img()
_UpperCAmelCase : str = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : Dict = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : str = model(**lowerCamelCase__ )
_UpperCAmelCase : List[str] = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : List[Any] = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
_UpperCAmelCase : Tuple = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(lowerCamelCase__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : List[Any] = self.default_image_processor
_UpperCAmelCase : Union[str, Any] = prepare_img()
_UpperCAmelCase : Optional[int] = image_processor(lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCamelCase__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**lowerCamelCase__ )
# masks_queries_logits
_UpperCAmelCase : List[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
_UpperCAmelCase : List[str] = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
_UpperCAmelCase : List[Any] = torch.tensor(lowerCamelCase__ ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
# class_queries_logits
_UpperCAmelCase : Dict = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
_UpperCAmelCase : str = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCamelCase__ , atol=lowerCamelCase__ ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(lowerCamelCase__ ).eval()
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors="pt" , )
_UpperCAmelCase : str = inputs["pixel_values"].to(lowerCamelCase__ )
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["mask_labels"]]
_UpperCAmelCase : List[str] = [el.to(lowerCamelCase__ ) for el in inputs["class_labels"]]
with torch.no_grad():
_UpperCAmelCase : int = model(**lowerCamelCase__ )
self.assertTrue(outputs.loss is not None )
| 40
| 0
|
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
lowerCamelCase__ = get_tests_dir('fixtures')
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = mock.Mock()
_UpperCAmelCase : List[Any] = 5_00
_UpperCAmelCase : Dict = {}
_UpperCAmelCase : Dict = HTTPError
_UpperCAmelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=_A ) as mock_head:
_UpperCAmelCase : int = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCAmelCase__ ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
with self.assertRaises(_A ):
# config is in subfolder, the following should not work without specifying the subfolder
_UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
_UpperCAmelCase : Optional[Any] = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(_A )
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = TOKEN
HfFolder.save_token(_A )
@classmethod
def lowerCAmelCase__ ( cls : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def lowerCAmelCase__ ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = ViTImageProcessor.from_pretrained(_A )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase : Optional[Any] = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_A , repo_id="test-image-processor" , push_to_hub=_A , use_auth_token=self._token )
_UpperCAmelCase : Dict = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
def lowerCAmelCase__ ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : int = ViTImageProcessor.from_pretrained(_A )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
_UpperCAmelCase : Any = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_A , repo_id="valid_org/test-image-processor-org" , push_to_hub=_A , use_auth_token=self._token )
_UpperCAmelCase : str = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
def lowerCAmelCase__ ( self : List[Any] ) ->Any:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
_UpperCAmelCase : List[Any] = CustomImageProcessor.from_pretrained(_A )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
_UpperCAmelCase : Any = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=_A )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 704
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (__lowerCAmelCase ):
return int(x / 2**20 )
class lowerCAmelCase__ :
def __enter__( self : int ) ->Optional[Any]:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
_UpperCAmelCase : Tuple = torch.cuda.memory_allocated()
return self
def __exit__( self : Tuple , *lowerCamelCase__ : str ) ->int:
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
_UpperCAmelCase : List[str] = torch.cuda.memory_allocated()
_UpperCAmelCase : Tuple = torch.cuda.max_memory_allocated()
_UpperCAmelCase : List[Any] = bamb(self.end - self.begin )
_UpperCAmelCase : int = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" , __lowerCAmelCase = 320 , __lowerCAmelCase = 160 , ):
_UpperCAmelCase : int = AutoTokenizer.from_pretrained(__lowerCAmelCase )
_UpperCAmelCase : Any = load_dataset(
"glue" , "mrpc" , split={"train": F"""train[:{n_train}]""", "validation": F"""validation[:{n_val}]"""} )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase : List[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase : int = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase : List[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
_UpperCAmelCase : Any = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
_UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
# Initialize accelerator
_UpperCAmelCase : Union[str, Any] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase : List[Any] = config["lr"]
_UpperCAmelCase : List[Any] = int(config["num_epochs"] )
_UpperCAmelCase : int = int(config["seed"] )
_UpperCAmelCase : Union[str, Any] = int(config["batch_size"] )
_UpperCAmelCase : Tuple = args.model_name_or_path
set_seed(__lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
_UpperCAmelCase : Dict = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase : str = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase : Any = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase : Tuple = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
_UpperCAmelCase : Optional[Any] = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase : str = 0
# Now we train the model
_UpperCAmelCase : Optional[Any] = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = model(**__lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = outputs.loss
_UpperCAmelCase : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
_UpperCAmelCase : Optional[int] = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def __lowerCAmelCase ():
_UpperCAmelCase : Any = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=__lowerCAmelCase , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=__lowerCAmelCase , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=1 , help="Number of train epochs." , )
_UpperCAmelCase : Tuple = parser.parse_args()
_UpperCAmelCase : Optional[Any] = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 40
| 0
|
'''simple docstring'''
import json
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from transformers import OneFormerImageProcessor
from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle
from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput
if is_vision_available():
from PIL import Image
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase="shi-labs/oneformer_demo" ):
with open(hf_hub_download(lowerCAmelCase__ , lowerCAmelCase__ , repo_type="dataset" ) , "r" ) as f:
_UpperCAmelCase : str = json.load(lowerCAmelCase__ )
_UpperCAmelCase : str = {}
_UpperCAmelCase : Dict = []
_UpperCAmelCase : Optional[Any] = []
for key, info in class_info.items():
_UpperCAmelCase : Union[str, Any] = info["name"]
class_names.append(info["name"] )
if info["isthing"]:
thing_ids.append(int(lowerCAmelCase__ ) )
_UpperCAmelCase : Optional[Any] = thing_ids
_UpperCAmelCase : str = class_names
return metadata
class lowerCAmelCase__ ( unittest.TestCase ):
def __init__( self : Any , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Any=7 , lowerCamelCase__ : int=3 , lowerCamelCase__ : Dict=30 , lowerCamelCase__ : Optional[Any]=4_00 , lowerCamelCase__ : Any=None , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : str=True , lowerCamelCase__ : Optional[Any]=[0.5, 0.5, 0.5] , lowerCamelCase__ : Tuple=[0.5, 0.5, 0.5] , lowerCamelCase__ : List[str]=10 , lowerCamelCase__ : List[str]=False , lowerCamelCase__ : Tuple=2_55 , lowerCamelCase__ : Tuple="shi-labs/oneformer_demo" , lowerCamelCase__ : Tuple="ade20k_panoptic.json" , lowerCamelCase__ : List[Any]=10 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Any = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : Optional[Any] = min_resolution
_UpperCAmelCase : int = max_resolution
_UpperCAmelCase : Optional[Any] = do_resize
_UpperCAmelCase : Union[str, Any] = {"shortest_edge": 32, "longest_edge": 13_33} if size is None else size
_UpperCAmelCase : Any = do_normalize
_UpperCAmelCase : Optional[Any] = image_mean
_UpperCAmelCase : Optional[Any] = image_std
_UpperCAmelCase : int = class_info_file
_UpperCAmelCase : str = prepare_metadata(snake_case__ , snake_case__ )
_UpperCAmelCase : List[str] = num_text
_UpperCAmelCase : Tuple = repo_path
# for the post_process_functions
_UpperCAmelCase : Union[str, Any] = 2
_UpperCAmelCase : int = 10
_UpperCAmelCase : Dict = 10
_UpperCAmelCase : Tuple = 3
_UpperCAmelCase : Optional[Any] = 4
_UpperCAmelCase : str = num_labels
_UpperCAmelCase : List[Any] = do_reduce_labels
_UpperCAmelCase : List[str] = ignore_index
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"num_labels": self.num_labels,
"do_reduce_labels": self.do_reduce_labels,
"ignore_index": self.ignore_index,
"class_info_file": self.class_info_file,
"metadata": self.metadata,
"num_text": self.num_text,
}
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Union[str, Any]=False ) ->List[Any]:
'''simple docstring'''
if not batched:
_UpperCAmelCase : Optional[int] = image_inputs[0]
if isinstance(snake_case__ , Image.Image ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = image.size
else:
_UpperCAmelCase , _UpperCAmelCase : Dict = image.shape[1], image.shape[2]
if w < h:
_UpperCAmelCase : Union[str, Any] = int(self.size["shortest_edge"] * h / w )
_UpperCAmelCase : List[str] = self.size["shortest_edge"]
elif w > h:
_UpperCAmelCase : Tuple = self.size["shortest_edge"]
_UpperCAmelCase : str = int(self.size["shortest_edge"] * w / h )
else:
_UpperCAmelCase : Any = self.size["shortest_edge"]
_UpperCAmelCase : List[Any] = self.size["shortest_edge"]
else:
_UpperCAmelCase : List[Any] = []
for image in image_inputs:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_UpperCAmelCase : str = max(snake_case__ , key=lambda lowerCamelCase__ : item[0] )[0]
_UpperCAmelCase : Optional[Any] = max(snake_case__ , key=lambda lowerCamelCase__ : item[1] )[1]
return expected_height, expected_width
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
return OneFormerForUniversalSegmentationOutput(
# +1 for null class
class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , )
@require_torch
@require_vision
class lowerCAmelCase__ ( __a , unittest.TestCase ):
lowerCAmelCase : List[str] = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None
# only for test_image_processing_common.test_image_proc_to_json_string
lowerCAmelCase : List[str] = image_processing_class
def lowerCAmelCase__ ( self : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Dict = OneFormerImageProcessorTester(self )
@property
def lowerCAmelCase__ ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
return self.image_processing_tester.prepare_image_processor_dict()
def lowerCAmelCase__ ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case__ , "image_mean" ) )
self.assertTrue(hasattr(snake_case__ , "image_std" ) )
self.assertTrue(hasattr(snake_case__ , "do_normalize" ) )
self.assertTrue(hasattr(snake_case__ , "do_resize" ) )
self.assertTrue(hasattr(snake_case__ , "size" ) )
self.assertTrue(hasattr(snake_case__ , "ignore_index" ) )
self.assertTrue(hasattr(snake_case__ , "class_info_file" ) )
self.assertTrue(hasattr(snake_case__ , "num_text" ) )
self.assertTrue(hasattr(snake_case__ , "repo_path" ) )
self.assertTrue(hasattr(snake_case__ , "metadata" ) )
self.assertTrue(hasattr(snake_case__ , "do_reduce_labels" ) )
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , Image.Image )
# Test not batched input
_UpperCAmelCase : str = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : int = self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
_UpperCAmelCase : List[str] = image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ , numpify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , np.ndarray )
# Test not batched input
_UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
_UpperCAmelCase : str = image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase : Any = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ , torchify=snake_case__ )
for image in image_inputs:
self.assertIsInstance(snake_case__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase : List[str] = image_processor(image_inputs[0] , ["semantic"] , return_tensors="pt" ).pixel_values
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.image_processing_tester.get_expected_values(snake_case__ )
self.assertEqual(
encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , )
# Test batched
_UpperCAmelCase , _UpperCAmelCase : Any = self.image_processing_tester.get_expected_values(snake_case__ , batched=snake_case__ )
_UpperCAmelCase : List[Any] = image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processing_tester.batch_size,
self.image_processing_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Any=False , lowerCamelCase__ : Dict=False , lowerCamelCase__ : Union[str, Any]="np" ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# prepare image and target
_UpperCAmelCase : Tuple = self.image_processing_tester.num_labels
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Optional[int] = None
_UpperCAmelCase : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=snake_case__ )
if with_segmentation_maps:
_UpperCAmelCase : Optional[Any] = num_labels
if is_instance_map:
_UpperCAmelCase : Optional[int] = list(range(snake_case__ ) ) * 2
_UpperCAmelCase : int = dict(enumerate(snake_case__ ) )
_UpperCAmelCase : Union[str, Any] = [
np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs
]
if segmentation_type == "pil":
_UpperCAmelCase : str = [Image.fromarray(snake_case__ ) for annotation in annotations]
_UpperCAmelCase : Optional[Any] = image_processor(
snake_case__ , ["semantic"] * len(snake_case__ ) , snake_case__ , return_tensors="pt" , instance_id_to_semantic_id=snake_case__ , pad_and_return_pixel_mask=snake_case__ , )
return inputs
def lowerCAmelCase__ ( self : Dict ) ->List[str]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
def common(lowerCamelCase__ : Optional[int]=False , lowerCamelCase__ : Dict=None ):
_UpperCAmelCase : Union[str, Any] = self.comm_get_image_processor_inputs(
with_segmentation_maps=snake_case__ , is_instance_map=snake_case__ , segmentation_type=snake_case__ )
_UpperCAmelCase : List[Any] = inputs["mask_labels"]
_UpperCAmelCase : Optional[Any] = inputs["class_labels"]
_UpperCAmelCase : str = inputs["pixel_values"]
_UpperCAmelCase : Optional[Any] = inputs["text_inputs"]
# check the batch_size
for mask_label, class_label, text_input in zip(snake_case__ , snake_case__ , snake_case__ ):
self.assertEqual(mask_label.shape[0] , class_label.shape[0] )
# this ensure padding has happened
self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] )
self.assertEqual(len(snake_case__ ) , self.image_processing_tester.num_text )
common()
common(is_instance_map=snake_case__ )
common(is_instance_map=snake_case__ , segmentation_type="pil" )
common(is_instance_map=snake_case__ , segmentation_type="pil" )
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : str = np.zeros((20, 50) )
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Any = 1
_UpperCAmelCase : Union[str, Any] = 1
_UpperCAmelCase : str = binary_mask_to_rle(snake_case__ )
self.assertEqual(len(snake_case__ ) , 4 )
self.assertEqual(rle[0] , 21 )
self.assertEqual(rle[1] , 45 )
def lowerCAmelCase__ ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : Any = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : Tuple = fature_extractor.post_process_semantic_segmentation(snake_case__ )
self.assertEqual(len(snake_case__ ) , self.image_processing_tester.batch_size )
self.assertEqual(
segmentation[0].shape , (
self.image_processing_tester.height,
self.image_processing_tester.width,
) , )
_UpperCAmelCase : str = [(1, 4) for i in range(self.image_processing_tester.batch_size )]
_UpperCAmelCase : Any = fature_extractor.post_process_semantic_segmentation(snake_case__ , target_sizes=snake_case__ )
self.assertEqual(segmentation[0].shape , target_sizes[0] )
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : Tuple = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : Optional[int] = image_processor.post_process_instance_segmentation(snake_case__ , threshold=0 )
self.assertTrue(len(snake_case__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : str = self.image_processing_class(
num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="ade20k_panoptic.json" , num_text=self.image_processing_tester.num_text , repo_path="shi-labs/oneformer_demo" , )
_UpperCAmelCase : str = self.image_processing_tester.get_fake_oneformer_outputs()
_UpperCAmelCase : Any = image_processor.post_process_panoptic_segmentation(snake_case__ , threshold=0 )
self.assertTrue(len(snake_case__ ) == self.image_processing_tester.batch_size )
for el in segmentation:
self.assertTrue("segmentation" in el )
self.assertTrue("segments_info" in el )
self.assertEqual(type(el["segments_info"] ) , snake_case__ )
self.assertEqual(
el["segmentation"].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
| 705
|
'''simple docstring'''
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCamelCase__ = {
'169M': 768,
'430M': 1_024,
'1B5': 2_048,
'3B': 2_560,
'7B': 4_096,
'14B': 5_120,
}
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : List[str] = list(state_dict.keys() )
for name in state_dict_keys:
_UpperCAmelCase : Optional[int] = state_dict.pop(__lowerCAmelCase )
# emb -> embedding
if name.startswith("emb." ):
_UpperCAmelCase : Tuple = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
_UpperCAmelCase : Optional[int] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
_UpperCAmelCase : Union[str, Any] = re.sub(R"blocks\.(\d+)\.att" , R"blocks.\1.attention" , __lowerCAmelCase )
# ffn -> feed_forward
_UpperCAmelCase : Dict = re.sub(R"blocks\.(\d+)\.ffn" , R"blocks.\1.feed_forward" , __lowerCAmelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
_UpperCAmelCase : int = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
_UpperCAmelCase : Union[str, Any] = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
_UpperCAmelCase : int = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
_UpperCAmelCase : List[str] = "rwkv." + name
_UpperCAmelCase : Optional[Any] = weight
return state_dict
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
_UpperCAmelCase : str = 50_277
_UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
_UpperCAmelCase : Tuple = PreTrainedTokenizerFast(tokenizer_file=__lowerCAmelCase )
_UpperCAmelCase : List[Any] = len(__lowerCAmelCase )
tokenizer.save_pretrained(__lowerCAmelCase )
# 2. Build the config
_UpperCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_UpperCAmelCase : Optional[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(F"""`size` should be one of {possible_sizes}, got {size}.""" )
_UpperCAmelCase : Any = RwkvConfig(
vocab_size=__lowerCAmelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(__lowerCAmelCase )
# 3. Download model file then convert state_dict
_UpperCAmelCase : str = hf_hub_download(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = torch.load(__lowerCAmelCase , map_location="cpu" )
_UpperCAmelCase : Any = convert_state_dict(__lowerCAmelCase )
# 4. Split in shards and save
_UpperCAmelCase , _UpperCAmelCase : List[str] = shard_checkpoint(__lowerCAmelCase )
for shard_file, shard in shards.items():
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if index is not None:
_UpperCAmelCase : int = os.path.join(__lowerCAmelCase , __lowerCAmelCase )
# Save the index as well
with open(__lowerCAmelCase , "w" , encoding="utf-8" ) as f:
_UpperCAmelCase : int = json.dumps(__lowerCAmelCase , indent=2 , sort_keys=__lowerCAmelCase ) + "\n"
f.write(__lowerCAmelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
_UpperCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_UpperCAmelCase : Union[str, Any] = torch.load(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
_UpperCAmelCase : Optional[Any] = AutoModelForCausalLM.from_pretrained(__lowerCAmelCase )
model.push_to_hub(__lowerCAmelCase , max_shard_size="2GB" )
tokenizer.push_to_hub(__lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 40
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase__ ( __lowerCamelCase ):
lowerCAmelCase : Dict = '''yolos'''
def __init__( self : Dict , lowerCamelCase__ : List[str]=7_68 , lowerCamelCase__ : Optional[Any]=12 , lowerCamelCase__ : Optional[int]=12 , lowerCamelCase__ : Any=30_72 , lowerCamelCase__ : List[Any]="gelu" , lowerCamelCase__ : Optional[int]=0.0 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Optional[int]=0.0_2 , lowerCamelCase__ : List[str]=1E-12 , lowerCamelCase__ : str=[5_12, 8_64] , lowerCamelCase__ : List[Any]=16 , lowerCamelCase__ : int=3 , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : List[str]=1_00 , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : Any=False , lowerCamelCase__ : Any=1 , lowerCamelCase__ : Tuple=5 , lowerCamelCase__ : Tuple=2 , lowerCamelCase__ : Any=5 , lowerCamelCase__ : Optional[int]=2 , lowerCamelCase__ : Any=0.1 , **lowerCamelCase__ : Optional[Any] , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**a_ )
_UpperCAmelCase : Optional[Any] = hidden_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : List[str] = num_attention_heads
_UpperCAmelCase : Any = intermediate_size
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : Dict = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Dict = initializer_range
_UpperCAmelCase : Optional[Any] = layer_norm_eps
_UpperCAmelCase : Optional[Any] = image_size
_UpperCAmelCase : Dict = patch_size
_UpperCAmelCase : List[Any] = num_channels
_UpperCAmelCase : Any = qkv_bias
_UpperCAmelCase : str = num_detection_tokens
_UpperCAmelCase : int = use_mid_position_embeddings
_UpperCAmelCase : int = auxiliary_loss
# Hungarian matcher
_UpperCAmelCase : Optional[int] = class_cost
_UpperCAmelCase : Optional[Any] = bbox_cost
_UpperCAmelCase : Union[str, Any] = giou_cost
# Loss coefficients
_UpperCAmelCase : Optional[int] = bbox_loss_coefficient
_UpperCAmelCase : Tuple = giou_loss_coefficient
_UpperCAmelCase : Optional[int] = eos_coefficient
class lowerCAmelCase__ ( __lowerCamelCase ):
lowerCAmelCase : Dict = version.parse("1.11" )
@property
def lowerCAmelCase__ ( self : str ) ->Union[str, Any]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
return 1E-4
@property
def lowerCAmelCase__ ( self : Tuple ) ->List[Any]:
'''simple docstring'''
return 12
| 706
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def __lowerCAmelCase (__lowerCAmelCase ):
return np.maximum(0 , __lowerCAmelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 40
| 0
|
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = [0] * no_of_processes
_UpperCAmelCase : Dict = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(_snake_case ):
_UpperCAmelCase : int = burst_time[i]
_UpperCAmelCase : Union[str, Any] = []
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Tuple = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_UpperCAmelCase : Optional[Any] = []
_UpperCAmelCase : str = -1
for i in range(_snake_case ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(_snake_case )
if len(_snake_case ) > 0:
_UpperCAmelCase : str = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_UpperCAmelCase : Union[str, Any] = i
total_time += burst_time[target_process]
completed += 1
_UpperCAmelCase : int = 0
_UpperCAmelCase : Dict = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[Any] = [0] * no_of_processes
for i in range(_snake_case ):
_UpperCAmelCase : Optional[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
lowerCamelCase__ = 4
lowerCamelCase__ = [2, 5, 3, 7]
lowerCamelCase__ = [0, 0, 0, 0]
lowerCamelCase__ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowerCamelCase__ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
F'''{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'''
F'''{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'''
)
print(F'''\nAverage waiting time = {mean(waiting_time):.5f}''')
print(F'''Average turnaround time = {mean(turn_around_time):.5f}''')
| 707
|
'''simple docstring'''
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __lowerCAmelCase (__lowerCAmelCase ):
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class lowerCAmelCase__ :
def __init__( self : List[Any] , lowerCamelCase__ : Iterable[torch.nn.Parameter] , lowerCamelCase__ : float = 0.9_9_9_9 , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 0 , lowerCamelCase__ : bool = False , lowerCamelCase__ : Union[float, int] = 1.0 , lowerCamelCase__ : Union[float, int] = 2 / 3 , lowerCamelCase__ : Optional[Any] = None , lowerCamelCase__ : Dict[str, Any] = None , **lowerCamelCase__ : Optional[int] , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
_UpperCAmelCase : Optional[int] = True
if kwargs.get("max_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate("max_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : str = kwargs["max_value"]
if kwargs.get("min_value" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate("min_value" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
_UpperCAmelCase : Tuple = kwargs["min_value"]
_UpperCAmelCase : Optional[Any] = list(lowerCamelCase__ )
_UpperCAmelCase : Dict = [p.clone().detach() for p in parameters]
if kwargs.get("device" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate("device" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ )
self.to(device=kwargs["device"] )
_UpperCAmelCase : Union[str, Any] = None
_UpperCAmelCase : int = decay
_UpperCAmelCase : Any = min_decay
_UpperCAmelCase : Optional[int] = update_after_step
_UpperCAmelCase : str = use_ema_warmup
_UpperCAmelCase : Union[str, Any] = inv_gamma
_UpperCAmelCase : Union[str, Any] = power
_UpperCAmelCase : List[str] = 0
_UpperCAmelCase : List[str] = None # set in `step()`
_UpperCAmelCase : Optional[int] = model_cls
_UpperCAmelCase : Union[str, Any] = model_config
@classmethod
def lowerCAmelCase__ ( cls : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->"EMAModel":
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = model_cls.load_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
_UpperCAmelCase : List[str] = cls(model.parameters() , model_cls=lowerCamelCase__ , model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : int ) ->Dict:
'''simple docstring'''
if self.model_cls is None:
raise ValueError("`save_pretrained` can only be used if `model_cls` was defined at __init__." )
if self.model_config is None:
raise ValueError("`save_pretrained` can only be used if `model_config` was defined at __init__." )
_UpperCAmelCase : int = self.model_cls.from_config(self.model_config )
_UpperCAmelCase : Union[str, Any] = self.state_dict()
state_dict.pop("shadow_params" , lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : int ) ->float:
'''simple docstring'''
_UpperCAmelCase : int = max(0 , optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
_UpperCAmelCase : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
_UpperCAmelCase : Any = (1 + step) / (10 + step)
_UpperCAmelCase : int = min(lowerCamelCase__ , self.decay )
# make sure decay is not smaller than min_decay
_UpperCAmelCase : Union[str, Any] = max(lowerCamelCase__ , self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->Dict:
'''simple docstring'''
if isinstance(lowerCamelCase__ , torch.nn.Module ):
_UpperCAmelCase : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
"passing a `torch.nn.Module` to `ExponentialMovingAverage.step`" , "1.0.0" , lowerCamelCase__ , standard_warn=lowerCamelCase__ , )
_UpperCAmelCase : Any = parameters.parameters()
_UpperCAmelCase : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
_UpperCAmelCase : Tuple = self.get_decay(self.optimization_step )
_UpperCAmelCase : Any = decay
_UpperCAmelCase : Optional[Any] = 1 - decay
_UpperCAmelCase : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
_UpperCAmelCase : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ , modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params , lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Dict=None , lowerCamelCase__ : Optional[int]=None ) ->None:
'''simple docstring'''
_UpperCAmelCase : str = [
p.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowerCAmelCase__ ( self : List[Any] ) ->dict:
'''simple docstring'''
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
_UpperCAmelCase : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Iterable[torch.nn.Parameter] ) ->None:
'''simple docstring'''
if self.temp_stored_params is None:
raise RuntimeError("This ExponentialMovingAverage has no `store()`ed weights " "to `restore()`" )
for c_param, param in zip(self.temp_stored_params , lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
_UpperCAmelCase : int = None
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : dict ) ->None:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
_UpperCAmelCase : List[str] = state_dict.get("decay" , self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError("Decay must be between 0 and 1" )
_UpperCAmelCase : Union[str, Any] = state_dict.get("min_decay" , self.min_decay )
if not isinstance(self.min_decay , lowerCamelCase__ ):
raise ValueError("Invalid min_decay" )
_UpperCAmelCase : List[str] = state_dict.get("optimization_step" , self.optimization_step )
if not isinstance(self.optimization_step , lowerCamelCase__ ):
raise ValueError("Invalid optimization_step" )
_UpperCAmelCase : List[Any] = state_dict.get("update_after_step" , self.update_after_step )
if not isinstance(self.update_after_step , lowerCamelCase__ ):
raise ValueError("Invalid update_after_step" )
_UpperCAmelCase : str = state_dict.get("use_ema_warmup" , self.use_ema_warmup )
if not isinstance(self.use_ema_warmup , lowerCamelCase__ ):
raise ValueError("Invalid use_ema_warmup" )
_UpperCAmelCase : int = state_dict.get("inv_gamma" , self.inv_gamma )
if not isinstance(self.inv_gamma , (float, int) ):
raise ValueError("Invalid inv_gamma" )
_UpperCAmelCase : Any = state_dict.get("power" , self.power )
if not isinstance(self.power , (float, int) ):
raise ValueError("Invalid power" )
_UpperCAmelCase : List[str] = state_dict.get("shadow_params" , lowerCamelCase__ )
if shadow_params is not None:
_UpperCAmelCase : Optional[Any] = shadow_params
if not isinstance(self.shadow_params , lowerCamelCase__ ):
raise ValueError("shadow_params must be a list" )
if not all(isinstance(lowerCamelCase__ , torch.Tensor ) for p in self.shadow_params ):
raise ValueError("shadow_params must all be Tensors" )
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Optional[int] = ""
for i in table:
res += inp[i - 1]
return res
def __lowerCAmelCase (__lowerCAmelCase ):
return data[1:] + data[0]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Union[str, Any] = ""
for i in range(len(_A ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : List[str] = int("0b" + data[0] + data[-1] , 2 )
_UpperCAmelCase : Optional[int] = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = message[:4]
_UpperCAmelCase : str = message[4:]
_UpperCAmelCase : Union[str, Any] = apply_table(_A , _A )
_UpperCAmelCase : Optional[int] = xor(_A , _A )
_UpperCAmelCase : Union[str, Any] = apply_sbox(_A , temp[:4] ) # noqa: E741
_UpperCAmelCase : List[str] = apply_sbox(_A , temp[4:] )
_UpperCAmelCase : Union[str, Any] = "0" * (2 - len(_A )) + l # noqa: E741
_UpperCAmelCase : str = "0" * (2 - len(_A )) + r
_UpperCAmelCase : List[str] = apply_table(l + r , _A )
_UpperCAmelCase : Tuple = xor(_A , _A )
return temp + right
if __name__ == "__main__":
lowerCamelCase__ = input('Enter 10 bit key: ')
lowerCamelCase__ = input('Enter 8 bit message: ')
lowerCamelCase__ = [6, 3, 7, 4, 8, 5, 10, 9]
lowerCamelCase__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
lowerCamelCase__ = [2, 4, 3, 1]
lowerCamelCase__ = [2, 6, 3, 1, 4, 8, 5, 7]
lowerCamelCase__ = [4, 1, 3, 5, 7, 2, 8, 6]
lowerCamelCase__ = [4, 1, 2, 3, 2, 3, 4, 1]
lowerCamelCase__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
lowerCamelCase__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
lowerCamelCase__ = apply_table(key, paa_table)
lowerCamelCase__ = temp[:5]
lowerCamelCase__ = temp[5:]
lowerCamelCase__ = left_shift(left)
lowerCamelCase__ = left_shift(right)
lowerCamelCase__ = apply_table(left + right, pa_table)
lowerCamelCase__ = left_shift(left)
lowerCamelCase__ = left_shift(right)
lowerCamelCase__ = left_shift(left)
lowerCamelCase__ = left_shift(right)
lowerCamelCase__ = apply_table(left + right, pa_table)
# encryption
lowerCamelCase__ = apply_table(message, IP)
lowerCamelCase__ = function(expansion, sa, sa, keya, temp)
lowerCamelCase__ = temp[4:] + temp[:4]
lowerCamelCase__ = function(expansion, sa, sa, keya, temp)
lowerCamelCase__ = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
lowerCamelCase__ = apply_table(CT, IP)
lowerCamelCase__ = function(expansion, sa, sa, keya, temp)
lowerCamelCase__ = temp[4:] + temp[:4]
lowerCamelCase__ = function(expansion, sa, sa, keya, temp)
lowerCamelCase__ = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 708
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
lowerCamelCase__ = parser.parse_args()
if args.model_type == "bert":
lowerCamelCase__ = BertForMaskedLM.from_pretrained(args.model_name)
lowerCamelCase__ = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
lowerCamelCase__ = model.state_dict()
lowerCamelCase__ = {}
for w in ["word_embeddings", "position_embeddings"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''{prefix}.embeddings.LayerNorm.{w}''']
lowerCamelCase__ = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
lowerCamelCase__ = state_dict[
F'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
lowerCamelCase__ = state_dict['cls.predictions.decoder.weight']
lowerCamelCase__ = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.dense.{w}''']
lowerCamelCase__ = state_dict[F'''cls.predictions.transform.LayerNorm.{w}''']
print(F'''N layers selected for distillation: {std_idx}''')
print(F'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(F'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 40
| 0
|
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowerCamelCase__ = '3'
print('Python version:', sys.version)
print('OS platform:', platform.platform())
print('OS architecture:', platform.machine())
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
except ImportError:
print('Torch version:', None)
try:
import transformers
print('transformers version:', transformers.__version__)
except ImportError:
print('transformers version:', None)
| 709
|
'''simple docstring'''
from __future__ import annotations
lowerCamelCase__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class lowerCAmelCase__ :
def __init__( self : int , lowerCamelCase__ : dict[str, list[str]] , lowerCamelCase__ : str ) ->None:
'''simple docstring'''
_UpperCAmelCase : Dict = graph
# mapping node to its parent in resulting breadth first tree
_UpperCAmelCase : dict[str, str | None] = {}
_UpperCAmelCase : List[Any] = source_vertex
def lowerCAmelCase__ ( self : Optional[int] ) ->None:
'''simple docstring'''
_UpperCAmelCase : List[Any] = {self.source_vertex}
_UpperCAmelCase : List[Any] = None
_UpperCAmelCase : List[str] = [self.source_vertex] # first in first out queue
while queue:
_UpperCAmelCase : int = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(lowerCamelCase__ )
_UpperCAmelCase : List[Any] = vertex
queue.append(lowerCamelCase__ )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str ) ->str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCAmelCase : int = self.parent.get(lowerCamelCase__ )
if target_vertex_parent is None:
_UpperCAmelCase : Tuple = (
F"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(lowerCamelCase__ )
return self.shortest_path(lowerCamelCase__ ) + F"""->{target_vertex}"""
if __name__ == "__main__":
lowerCamelCase__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 40
| 0
|
'''simple docstring'''
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
lowerCamelCase__ = 6_378_137.0
lowerCamelCase__ = 6_356_752.314_245
lowerCamelCase__ = 6_378_137
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : int = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
_UpperCAmelCase : List[Any] = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
_UpperCAmelCase : Tuple = atan((1 - flattening) * tan(radians(__lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
_UpperCAmelCase : Optional[Any] = haversine_distance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
_UpperCAmelCase : Optional[Any] = (b_lata + b_lata) / 2
_UpperCAmelCase : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
_UpperCAmelCase : Dict = (sin(__lowerCAmelCase ) ** 2) * (cos(__lowerCAmelCase ) ** 2)
_UpperCAmelCase : str = cos(sigma / 2 ) ** 2
_UpperCAmelCase : Union[str, Any] = (sigma - sin(__lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
_UpperCAmelCase : List[Any] = (cos(__lowerCAmelCase ) ** 2) * (sin(__lowerCAmelCase ) ** 2)
_UpperCAmelCase : str = sin(sigma / 2 ) ** 2
_UpperCAmelCase : Dict = (sigma + sin(__lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( UpperCAmelCase__ ):
lowerCAmelCase : Any = ["image_processor", "tokenizer"]
lowerCAmelCase : List[Any] = "BlipImageProcessor"
lowerCAmelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = False
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.image_processor
def __call__( self : Dict , lowerCamelCase__ : ImageInput = None , lowerCamelCase__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase__ : bool = True , lowerCamelCase__ : Union[bool, str, PaddingStrategy] = False , lowerCamelCase__ : Union[bool, str, TruncationStrategy] = None , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : int = 0 , lowerCamelCase__ : Optional[int] = None , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = False , lowerCamelCase__ : bool = True , lowerCamelCase__ : Optional[Union[str, TensorType]] = None , **lowerCamelCase__ : Tuple , ) ->BatchEncoding:
'''simple docstring'''
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
_UpperCAmelCase : Optional[int] = self.tokenizer
_UpperCAmelCase : List[Any] = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
return text_encoding
# add pixel_values
_UpperCAmelCase : Optional[int] = self.image_processor(lowerCamelCase__ , return_tensors=lowerCamelCase__ )
if text is not None:
_UpperCAmelCase : Dict = self.tokenizer(
text=lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , max_length=lowerCamelCase__ , stride=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , return_overflowing_tokens=lowerCamelCase__ , return_special_tokens_mask=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_length=lowerCamelCase__ , verbose=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ , )
else:
_UpperCAmelCase : int = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase__ )
return encoding_image_processor
def lowerCAmelCase__ ( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Dict ) ->Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : int , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ) ->Optional[int]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def lowerCAmelCase__ ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 40
| 0
|
'''simple docstring'''
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __lowerCAmelCase (__lowerCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = []
if isinstance(__snake_case , __snake_case ):
for v in tree.values():
shapes.extend(_fetch_dims(__snake_case ) )
elif isinstance(__snake_case , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(__snake_case ) )
elif isinstance(__snake_case , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Any = []
for d in reversed(__snake_case ):
idx.append(flat_idx % d )
_UpperCAmelCase : Optional[Any] = flat_idx // d
return tuple(reversed(__snake_case ) )
@torch.jit.ignore
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , ):
'''simple docstring'''
def reduce_edge_list(__lowerCAmelCase ) -> None:
_UpperCAmelCase : List[str] = True
for i in range(len(__snake_case ) ):
_UpperCAmelCase : Union[str, Any] = -1 * (i + 1)
l[reversed_idx] &= tally
_UpperCAmelCase : str = l[reversed_idx]
if start_edges is None:
_UpperCAmelCase : Optional[Any] = [s == 0 for s in start]
reduce_edge_list(__snake_case )
if end_edges is None:
_UpperCAmelCase : Optional[int] = [e == (d - 1) for e, d in zip(__snake_case , __snake_case )]
reduce_edge_list(__snake_case )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(__snake_case ) == 0:
return [()]
elif len(__snake_case ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Dict = []
# Dimensions common to start and end can be selected directly
for s, e in zip(__snake_case , __snake_case ):
if s == e:
path_list.append(slice(__snake_case , s + 1 ) )
else:
break
_UpperCAmelCase : List[str] = tuple(__snake_case )
_UpperCAmelCase : List[str] = len(__snake_case )
# start == end, and we're done
if divergence_idx == len(__snake_case ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCAmelCase : List[str] = start[divergence_idx]
return tuple(
path + (slice(__snake_case , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_UpperCAmelCase : Dict = end[divergence_idx]
return tuple(
path + (slice(__snake_case , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_UpperCAmelCase : List[Any] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : List[str] = t.shape[:no_batch_dims]
_UpperCAmelCase : Dict = list(_flat_idx_to_idx(__snake_case , __snake_case ) )
# _get_minimal_slice_set is inclusive
_UpperCAmelCase : Dict = list(_flat_idx_to_idx(flat_end - 1 , __snake_case ) )
# Get an ordered list of slices to perform
_UpperCAmelCase : int = _get_minimal_slice_set(
__snake_case , __snake_case , __snake_case , )
_UpperCAmelCase : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False , __lowerCAmelCase = None , __lowerCAmelCase = False , ):
'''simple docstring'''
if not (len(__snake_case ) > 0):
raise ValueError("Must provide at least one input" )
_UpperCAmelCase : List[str] = [shape[:no_batch_dims] for shape in _fetch_dims(__snake_case )]
_UpperCAmelCase : List[Any] = tuple([max(__snake_case ) for s in zip(*__snake_case )] )
def _prep_inputs(__lowerCAmelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_UpperCAmelCase : Union[str, Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_UpperCAmelCase : List[str] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
_UpperCAmelCase : str = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_UpperCAmelCase : List[Any] = tensor_tree_map(_prep_inputs , __snake_case )
_UpperCAmelCase : Optional[Any] = None
if _out is not None:
_UpperCAmelCase : Union[str, Any] = tensor_tree_map(lambda __lowerCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
_UpperCAmelCase : Optional[int] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_UpperCAmelCase : Optional[int] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(__lowerCAmelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_UpperCAmelCase : Union[str, Any] = 0
_UpperCAmelCase : Union[str, Any] = prepped_outputs
for _ in range(__snake_case ):
# Chunk the input
if not low_mem:
_UpperCAmelCase : List[Any] = _select_chunk
else:
_UpperCAmelCase : Any = partial(
_chunk_slice , flat_start=__snake_case , flat_end=min(__snake_case , i + chunk_size ) , no_batch_dims=len(__snake_case ) , )
_UpperCAmelCase : str = tensor_tree_map(__snake_case , __snake_case )
# Run the layer on the chunk
_UpperCAmelCase : Optional[Any] = layer(**__snake_case )
# Allocate space for the output
if out is None:
_UpperCAmelCase : List[Any] = tensor_tree_map(lambda __lowerCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , __snake_case )
# Put the chunk in its pre-allocated space
if isinstance(__snake_case , __snake_case ):
def assign(__lowerCAmelCase , __lowerCAmelCase ) -> None:
for k, v in da.items():
if isinstance(__snake_case , __snake_case ):
assign(__snake_case , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_UpperCAmelCase : List[str] = da[k]
assign(__snake_case , __snake_case )
elif isinstance(__snake_case , __snake_case ):
for xa, xa in zip(__snake_case , __snake_case ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_UpperCAmelCase : Optional[Any] = xa
elif isinstance(__snake_case , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_UpperCAmelCase : int = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
_UpperCAmelCase : Tuple = tensor_tree_map(lambda __lowerCAmelCase : t.view(orig_batch_dims + t.shape[1:] ) , __snake_case )
return out
class lowerCAmelCase__ :
def __init__( self : Union[str, Any] , lowerCamelCase__ : Tuple = 5_12 , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] = max_chunk_size
_UpperCAmelCase : str = None
_UpperCAmelCase : str = None
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : str ) ->int:
'''simple docstring'''
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_UpperCAmelCase : Dict = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
_UpperCAmelCase : Optional[Any] = [c for c in candidates if c > min_chunk_size]
_UpperCAmelCase : List[str] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(lowerCamelCase__ : Optional[Any] ) -> bool:
try:
with torch.no_grad():
fn(*__a , chunk_size=__a )
return True
except RuntimeError:
return False
_UpperCAmelCase : Tuple = 0
_UpperCAmelCase : Any = len(__a ) - 1
while i > min_viable_chunk_size_index:
_UpperCAmelCase : str = test_chunk_size(candidates[i] )
if not viable:
_UpperCAmelCase : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
_UpperCAmelCase : List[str] = i
_UpperCAmelCase : Union[str, Any] = (i + len(__a ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Union[str, Any] ) ->bool:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = True
for aa, aa in zip(__a , __a ):
assert type(__a ) == type(__a )
if isinstance(__a , (list, tuple) ):
consistent &= self._compare_arg_caches(__a , __a )
elif isinstance(__a , __a ):
_UpperCAmelCase : int = [v for _, v in sorted(aa.items() , key=lambda lowerCamelCase__ : x[0] )]
_UpperCAmelCase : Any = [v for _, v in sorted(aa.items() , key=lambda lowerCamelCase__ : x[0] )]
consistent &= self._compare_arg_caches(__a , __a )
else:
consistent &= aa == aa
return consistent
def lowerCAmelCase__ ( self : List[str] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] , ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : List[Any] = tree_map(lambda lowerCamelCase__ : a.shape if isinstance(__a , torch.Tensor ) else a , __a , __a )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(__a )
_UpperCAmelCase : Optional[int] = self._compare_arg_caches(self.cached_arg_data , __a )
else:
# Otherwise, we can reuse the precomputed value
_UpperCAmelCase : Optional[int] = False
if not consistent:
_UpperCAmelCase : List[Any] = self._determine_favorable_chunk_size(
__a , __a , __a , )
_UpperCAmelCase : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 711
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ): # noqa: E741
_UpperCAmelCase : List[str] = len(__lowerCAmelCase )
_UpperCAmelCase : str = 0
_UpperCAmelCase : List[str] = [0] * n
_UpperCAmelCase : int = [False] * n
_UpperCAmelCase : Dict = [False] * n
def dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
if parent == root:
out_edge_count += 1
_UpperCAmelCase : List[Any] = True
_UpperCAmelCase : str = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_UpperCAmelCase : List[str] = dfs(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Tuple = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_UpperCAmelCase : Dict = True
# AP found via cycle
if at == low[to]:
_UpperCAmelCase : Dict = True
else:
_UpperCAmelCase : Optional[int] = min(low[at] , __lowerCAmelCase )
return out_edge_count
for i in range(__lowerCAmelCase ):
if not visited[i]:
_UpperCAmelCase : str = 0
_UpperCAmelCase : Tuple = dfs(__lowerCAmelCase , __lowerCAmelCase , -1 , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = out_edge_count > 1
for x in range(len(__lowerCAmelCase ) ):
if is_art[x] is True:
print(__lowerCAmelCase )
# Adjacency list of graph
lowerCamelCase__ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 40
| 0
|
'''simple docstring'''
import math
lowerCamelCase__ = 10
lowerCamelCase__ = 7
lowerCamelCase__ = BALLS_PER_COLOUR * NUM_COLOURS
def __lowerCAmelCase (__lowerCAmelCase = 20 ):
_UpperCAmelCase : int = math.comb(lowerCamelCase_ , lowerCamelCase_ )
_UpperCAmelCase : List[str] = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase_ )
_UpperCAmelCase : Dict = NUM_COLOURS * (1 - missing_colour / total)
return F"""{result:.9f}"""
if __name__ == "__main__":
print(solution(20))
| 712
|
'''simple docstring'''
def __lowerCAmelCase ():
_UpperCAmelCase : str = 0
for i in range(1 , 1_001 ):
total += i**i
return str(__lowerCAmelCase )[-10:]
if __name__ == "__main__":
print(solution())
| 40
| 0
|
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase ):
return "".join(chr(ord(A__ ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 713
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__lowerCAmelCase , __lowerCAmelCase ) ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
if dataset.ndim != value_array.ndim:
_UpperCAmelCase : Optional[Any] = (
"Wrong input data's dimensions... "
F"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(__lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
_UpperCAmelCase : Optional[int] = (
"Wrong input data's shape... "
F"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(__lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
_UpperCAmelCase : Union[str, Any] = (
"Input data have different datatype... "
F"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = []
for value in value_array:
_UpperCAmelCase : List[str] = euclidean(__lowerCAmelCase , dataset[0] )
_UpperCAmelCase : Dict = dataset[0].tolist()
for dataset_value in dataset[1:]:
_UpperCAmelCase : int = euclidean(__lowerCAmelCase , __lowerCAmelCase )
if dist > temp_dist:
_UpperCAmelCase : Tuple = temp_dist
_UpperCAmelCase : Dict = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return np.dot(__lowerCAmelCase , __lowerCAmelCase ) / (norm(__lowerCAmelCase ) * norm(__lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 0
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
lowerCamelCase__ = {
'cola': 2,
'mnli': 3,
'mrpc': 2,
'sst-2': 2,
'sts-b': 1,
'qqp': 2,
'qnli': 2,
'rte': 2,
'wnli': 2,
}
logging.set_verbosity_info()
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
_UpperCAmelCase : List[str] = XLNetConfig.from_json_file(lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
_UpperCAmelCase : Any = finetuning_task
_UpperCAmelCase : Optional[int] = GLUE_TASKS_NUM_LABELS[finetuning_task]
_UpperCAmelCase : str = XLNetForSequenceClassification(lowerCamelCase__ )
elif "squad" in finetuning_task:
_UpperCAmelCase : Optional[Any] = finetuning_task
_UpperCAmelCase : Tuple = XLNetForQuestionAnswering(lowerCamelCase__ )
else:
_UpperCAmelCase : int = XLNetLMHeadModel(lowerCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Save pytorch-model
_UpperCAmelCase : Optional[int] = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : int = os.path.join(lowerCamelCase__ , lowerCamelCase__ )
print(F"""Save PyTorch model to {os.path.abspath(lowerCamelCase__ )}""" )
torch.save(model.state_dict() , lowerCamelCase__ )
print(F"""Save configuration file to {os.path.abspath(lowerCamelCase__ )}""" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--xlnet_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained XLNet model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=True,
help='Path to the folder to store the PyTorch model or dataset/vocab.',
)
parser.add_argument(
'--finetuning_task',
default=None,
type=str,
help='Name of a task on which the XLNet TensorFlow model was fine-tuned',
)
lowerCamelCase__ = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 714
|
'''simple docstring'''
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
lowerCamelCase__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
lowerCamelCase__ = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class lowerCAmelCase__ ( unittest.TestCase ):
def lowerCAmelCase__ ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
_UpperCAmelCase : Optional[Any] = self.diffusers_dir
shutil.copy(
os.path.join(lowerCamelCase__ , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase : int = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCAmelCase__ ( self : str , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any=None ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase : Tuple = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase : Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase : Tuple = black.format_str(lowerCamelCase__ , mode=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = os.path.join(self.diffusers_dir , "new_code.py" )
with open(lowerCamelCase__ , "w" , newline="\n" ) as f:
f.write(lowerCamelCase__ )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(lowerCamelCase__ ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=lowerCamelCase__ )
with open(lowerCamelCase__ , "r" ) as f:
self.assertTrue(f.read() , lowerCamelCase__ )
def lowerCAmelCase__ ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(lowerCamelCase__ , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , lowerCamelCase__ , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
# Copy consistency with a really long name
_UpperCAmelCase : int = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , F"""{long_class_name}SchedulerOutput""" , re.sub("Bert" , lowerCamelCase__ , lowerCamelCase__ ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , lowerCamelCase__ , overwrite_result=re.sub("DDPM" , "Test" , lowerCamelCase__ ) , )
| 40
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.