code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowercase ( __a , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
@property
def UpperCamelCase ( self ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = ort.SessionOptions()
snake_case = False
return options
def UpperCamelCase ( self ) -> List[str]:
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
snake_case = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=A__ , feature_extractor=A__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A__ )
snake_case = '''A red cat sitting on a park bench'''
snake_case = np.random.RandomState(0 )
snake_case = pipe(
prompt=A__ , image=A__ , mask_image=A__ , guidance_scale=7.5 , num_inference_steps=10 , generator=A__ , output_type='''np''' , )
snake_case = output.images
snake_case = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def UpperCamelCase ( self ) -> int:
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
snake_case = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
snake_case = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=A__ , safety_checker=A__ , feature_extractor=A__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A__ )
snake_case = '''A red cat sitting on a park bench'''
snake_case = np.random.RandomState(0 )
snake_case = pipe(
prompt=A__ , image=A__ , mask_image=A__ , guidance_scale=7.5 , num_inference_steps=20 , generator=A__ , output_type='''np''' , )
snake_case = output.images
snake_case = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 44
|
'''simple docstring'''
def __UpperCamelCase ( a : int , a : int ) ->int:
while b:
snake_case , snake_case = b, a % b
return a
def __UpperCamelCase ( a : int , a : int ) ->int:
return a if b == 0 else euclidean_gcd_recursive(a , a % b )
def __UpperCamelCase ( ) ->Optional[Any]:
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 44
| 1
|
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowercase ( __a ):
def __init__( self , A__ , A__ , A__ , A__ , A__ , A__ , A__ , ) -> List[Any]:
super().__init__()
self.register_modules(
vae=A__ , text_encoder=A__ , tokenizer=A__ , unet=A__ , scheduler=A__ , safety_checker=A__ , feature_extractor=A__ , )
def UpperCamelCase ( self , A__ = "auto" ) -> Optional[Any]:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
self.enable_attention_slicing(A__ )
@torch.no_grad()
def __call__( self , A__ , A__ = 5_12 , A__ = 5_12 , A__ = 50 , A__ = 7.5 , A__ = None , A__ = 1 , A__ = 0.0 , A__ = None , A__ = None , A__ = "pil" , A__ = True , A__ = None , A__ = 1 , A__ = None , **A__ , ) -> Optional[Any]:
if isinstance(A__ , A__ ):
snake_case = 1
elif isinstance(A__ , A__ ):
snake_case = len(A__ )
else:
raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(A__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ , A__ ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(A__ )}.""" )
# get prompt text embeddings
snake_case = self.tokenizer(
A__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
snake_case = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
snake_case , snake_case , snake_case = text_embeddings.shape
snake_case = text_embeddings.repeat(1 , A__ , 1 )
snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , A__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
snake_case = 42
if negative_prompt is None:
snake_case = ['''''']
elif type(A__ ) is not type(A__ ):
raise TypeError(
F"""`negative_prompt` should be the same type to `prompt`, but got {type(A__ )} !="""
F""" {type(A__ )}.""" )
elif isinstance(A__ , A__ ):
snake_case = [negative_prompt]
elif batch_size != len(A__ ):
raise ValueError(
F"""`negative_prompt`: {negative_prompt} has batch size {len(A__ )}, but `prompt`:"""
F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
snake_case = negative_prompt
snake_case = text_input_ids.shape[-1]
snake_case = self.tokenizer(
A__ , padding='''max_length''' , max_length=A__ , truncation=A__ , return_tensors='''pt''' , )
snake_case = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
snake_case = uncond_embeddings.shape[1]
snake_case = uncond_embeddings.repeat(A__ , A__ , 1 )
snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , A__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
snake_case = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
snake_case = torch.randn(
A__ , generator=A__ , device='''cpu''' , dtype=A__ ).to(self.device )
snake_case = torch.randn(A__ , generator=A__ , device='''cpu''' , dtype=A__ ).to(
self.device )
else:
snake_case = torch.randn(
A__ , generator=A__ , device=self.device , dtype=A__ )
snake_case = torch.randn(A__ , generator=A__ , device=self.device , dtype=A__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
snake_case = latents_reference.to(self.device )
snake_case = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
snake_case = (latents_shape[3] - latents_shape_reference[3]) // 2
snake_case = (latents_shape[2] - latents_shape_reference[2]) // 2
snake_case = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
snake_case = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
snake_case = 0 if dx < 0 else dx
snake_case = 0 if dy < 0 else dy
snake_case = max(-dx , 0 )
snake_case = max(-dy , 0 )
# import pdb
# pdb.set_trace()
snake_case = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
snake_case = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
snake_case = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
snake_case = {}
if accepts_eta:
snake_case = eta
for i, t in enumerate(self.progress_bar(A__ ) ):
# expand the latents if we are doing classifier free guidance
snake_case = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
snake_case = self.scheduler.scale_model_input(A__ , A__ )
# predict the noise residual
snake_case = self.unet(A__ , A__ , encoder_hidden_states=A__ ).sample
# perform guidance
if do_classifier_free_guidance:
snake_case , snake_case = noise_pred.chunk(2 )
snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
snake_case = self.scheduler.step(A__ , A__ , A__ , **A__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ , A__ , A__ )
snake_case = 1 / 0.1_8_2_1_5 * latents
snake_case = self.vae.decode(A__ ).sample
snake_case = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
snake_case = self.feature_extractor(self.numpy_to_pil(A__ ) , return_tensors='''pt''' ).to(
self.device )
snake_case , snake_case = self.safety_checker(
images=A__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
snake_case = None
if output_type == "pil":
snake_case = self.numpy_to_pil(A__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A__ , nsfw_content_detected=A__ )
| 44
|
'''simple docstring'''
import argparse
import copy
def __UpperCamelCase ( a : Union[str, Any] ) ->Tuple:
snake_case = {}
with open(a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[1], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[0], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __UpperCamelCase ( a : Dict , a : Tuple ) ->int:
with open(a ) as f:
snake_case = f.read(1 )
snake_case = start_node
snake_case = []
snake_case = start_node
snake_case = 0
while visiting not in first_solution:
snake_case = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(a ) and k[0] not in first_solution:
snake_case = k[1]
snake_case = k[0]
first_solution.append(a )
snake_case = distance_of_first_solution + int(a )
snake_case = best_node
first_solution.append(a )
snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __UpperCamelCase ( a : Optional[int] , a : str ) ->str:
snake_case = []
for n in solution[1:-1]:
snake_case = solution.index(a )
for kn in solution[1:-1]:
snake_case = solution.index(a )
if n == kn:
continue
snake_case = copy.deepcopy(a )
snake_case = kn
snake_case = n
snake_case = 0
for k in _tmp[:-1]:
snake_case = _tmp[_tmp.index(a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case = distance + int(i[1] )
_tmp.append(a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __UpperCamelCase ( a : Any , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] ) ->List[Any]:
snake_case = 1
snake_case = first_solution
snake_case = []
snake_case = distance_of_first_solution
snake_case = solution
while count <= iters:
snake_case = find_neighborhood(a , a )
snake_case = 0
snake_case = neighborhood[index_of_best_solution]
snake_case = len(a ) - 1
snake_case = False
while not found:
snake_case = 0
while i < len(a ):
if best_solution[i] != solution[i]:
snake_case = best_solution[i]
snake_case = solution[i]
break
snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case = True
snake_case = best_solution[:-1]
snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case = cost
snake_case = solution
else:
snake_case = index_of_best_solution + 1
snake_case = neighborhood[index_of_best_solution]
if len(a ) >= size:
tabu_list.pop(0 )
snake_case = count + 1
return best_solution_ever, best_cost
def __UpperCamelCase ( a : Union[str, Any]=None ) ->Optional[Any]:
snake_case = generate_neighbours(args.File )
snake_case , snake_case = generate_first_solution(
args.File , a )
snake_case , snake_case = tabu_search(
a , a , a , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 44
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import is_flaky, require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DonutImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , A__=False , A__=True , A__=True , A__=[0.5, 0.5, 0.5] , A__=[0.5, 0.5, 0.5] , ) -> Dict:
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size if size is not None else {'''height''': 18, '''width''': 20}
snake_case = do_thumbnail
snake_case = do_align_axis
snake_case = do_pad
snake_case = do_normalize
snake_case = image_mean
snake_case = image_std
def UpperCamelCase ( self ) -> List[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_thumbnail": self.do_thumbnail,
"do_align_long_axis": self.do_align_axis,
"do_pad": self.do_pad,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = DonutImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Tuple:
snake_case = DonutImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , '''do_resize''' ) )
self.assertTrue(hasattr(A__ , '''size''' ) )
self.assertTrue(hasattr(A__ , '''do_thumbnail''' ) )
self.assertTrue(hasattr(A__ , '''do_align_long_axis''' ) )
self.assertTrue(hasattr(A__ , '''do_pad''' ) )
self.assertTrue(hasattr(A__ , '''do_normalize''' ) )
self.assertTrue(hasattr(A__ , '''image_mean''' ) )
self.assertTrue(hasattr(A__ , '''image_std''' ) )
def UpperCamelCase ( self ) -> int:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
# Previous config had dimensions in (width, height) order
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) )
self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} )
def UpperCamelCase ( self ) -> List[Any]:
pass
@is_flaky()
def UpperCamelCase ( self ) -> List[str]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCamelCase ( self ) -> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
@is_flaky()
def UpperCamelCase ( self ) -> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
| 44
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 44
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = BlenderbotSmallTokenizer
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> int:
super().setUp()
snake_case = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
snake_case = dict(zip(A__ , range(len(A__ ) ) ) )
snake_case = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
snake_case = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
def UpperCamelCase ( self , **A__ ) -> Tuple:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , A__ ) -> Dict:
snake_case = '''adapt act apte'''
snake_case = '''adapt act apte'''
return input_text, output_text
def UpperCamelCase ( self ) -> Dict:
snake_case = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = '''adapt act apte'''
snake_case = ['''adapt''', '''act''', '''ap@@''', '''te''']
snake_case = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
snake_case = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
snake_case = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [13_84]
snake_case = '''I am a small frog.'''
snake_case = tok([src_text] , padding=A__ , truncation=A__ )['''input_ids''']
snake_case = tok.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def UpperCamelCase ( self ) -> str:
snake_case = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
snake_case = '''I am a small frog .'''
snake_case = '''.'''
snake_case = tok(A__ )['''input_ids''']
snake_case = tok(A__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 44
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowercase ( __a ):
_UpperCAmelCase = '''WhisperFeatureExtractor'''
_UpperCAmelCase = '''WhisperTokenizer'''
def __init__( self , A__ , A__ ) -> Optional[Any]:
super().__init__(A__ , A__ )
snake_case = self.feature_extractor
snake_case = False
def UpperCamelCase ( self , A__=None , A__=None , A__=True ) -> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=A__ , language=A__ , no_timestamps=A__ )
def __call__( self , *A__ , **A__ ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
snake_case = kwargs.pop('''audio''' , A__ )
snake_case = kwargs.pop('''sampling_rate''' , A__ )
snake_case = kwargs.pop('''text''' , A__ )
if len(A__ ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
snake_case = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
snake_case = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> str:
return self.tokenizer.decode(*A__ , **A__ )
def UpperCamelCase ( self , A__ , A__="np" ) -> Optional[Any]:
return self.tokenizer.get_prompt_ids(A__ , return_tensors=A__ )
| 44
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , A__=None , A__=True , A__=[0.5, 0.5, 0.5] , A__=[0.5, 0.5, 0.5] , A__=False , ) -> int:
snake_case = size if size is not None else {'''height''': 20, '''width''': 20}
snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = do_center_crop
snake_case = crop_size
snake_case = do_normalize
snake_case = image_mean
snake_case = image_std
snake_case = do_reduce_labels
def UpperCamelCase ( self ) -> Optional[Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def __UpperCamelCase ( ) ->Tuple:
snake_case = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case = Image.open(dataset[0]['''file'''] )
snake_case = Image.open(dataset[1]['''file'''] )
return image, map
def __UpperCamelCase ( ) ->List[str]:
snake_case = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
snake_case = Image.open(ds[0]['''file'''] )
snake_case = Image.open(ds[1]['''file'''] )
snake_case = Image.open(ds[2]['''file'''] )
snake_case = Image.open(ds[3]['''file'''] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = BeitImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = BeitImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , '''do_resize''' ) )
self.assertTrue(hasattr(A__ , '''size''' ) )
self.assertTrue(hasattr(A__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(A__ , '''center_crop''' ) )
self.assertTrue(hasattr(A__ , '''do_normalize''' ) )
self.assertTrue(hasattr(A__ , '''image_mean''' ) )
self.assertTrue(hasattr(A__ , '''image_std''' ) )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 20, '''width''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
self.assertEqual(image_processor.do_reduce_labels , A__ )
snake_case = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , crop_size=84 , reduce_labels=A__ )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
self.assertEqual(image_processor.do_reduce_labels , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
pass
def UpperCamelCase ( self ) -> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase ( self ) -> Optional[int]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase ( self ) -> List[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase ( self ) -> Union[str, Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
snake_case = []
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
snake_case = image_processing(image_inputs[0] , maps[0] , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched
snake_case = image_processing(A__ , A__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test not batched input (PIL images)
snake_case , snake_case = prepare_semantic_single_inputs()
snake_case = image_processing(A__ , A__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
1,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
# Test batched input (PIL images)
snake_case , snake_case = prepare_semantic_batch_inputs()
snake_case = image_processing(A__ , A__ , return_tensors='''pt''' )
self.assertEqual(
encoding['''pixel_values'''].shape , (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(
encoding['''labels'''].shape , (
2,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
self.assertEqual(encoding['''labels'''].dtype , torch.long )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
def UpperCamelCase ( self ) -> Any:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
snake_case , snake_case = prepare_semantic_single_inputs()
snake_case = image_processing(A__ , A__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 1_50 )
snake_case = True
snake_case = image_processing(A__ , A__ , return_tensors='''pt''' )
self.assertTrue(encoding['''labels'''].min().item() >= 0 )
self.assertTrue(encoding['''labels'''].max().item() <= 2_55 )
| 44
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowercase ( __a ):
_UpperCAmelCase = '''char'''
_UpperCAmelCase = '''bpe'''
_UpperCAmelCase = '''wp'''
_lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowercase ( __a ):
_UpperCAmelCase = ['''image_processor''', '''char_tokenizer''']
_UpperCAmelCase = '''ViTImageProcessor'''
_UpperCAmelCase = '''MgpstrTokenizer'''
def __init__( self , A__=None , A__=None , **A__ ) -> List[Any]:
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A__ , )
snake_case = kwargs.pop('''feature_extractor''' )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
snake_case = tokenizer
snake_case = AutoTokenizer.from_pretrained('''gpt2''' )
snake_case = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(A__ , A__ )
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> List[str]:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None:
snake_case = self.char_tokenizer(A__ , return_tensors=A__ , **A__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self , A__ ) -> Dict:
snake_case , snake_case , snake_case = sequences
snake_case = char_preds.size(0 )
snake_case , snake_case = self._decode_helper(A__ , '''char''' )
snake_case , snake_case = self._decode_helper(A__ , '''bpe''' )
snake_case , snake_case = self._decode_helper(A__ , '''wp''' )
snake_case = []
snake_case = []
for i in range(A__ ):
snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case = scores.index(max(A__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case = {}
snake_case = final_strs
snake_case = final_scores
snake_case = char_strs
snake_case = bpe_strs
snake_case = wp_strs
return out
def UpperCamelCase ( self , A__ , A__ ) -> Optional[Any]:
if format == DecodeType.CHARACTER:
snake_case = self.char_decode
snake_case = 1
snake_case = '''[s]'''
elif format == DecodeType.BPE:
snake_case = self.bpe_decode
snake_case = 2
snake_case = '''#'''
elif format == DecodeType.WORDPIECE:
snake_case = self.wp_decode
snake_case = 1_02
snake_case = '''[SEP]'''
else:
raise ValueError(F"""Format {format} is not supported.""" )
snake_case , snake_case = [], []
snake_case = pred_logits.size(0 )
snake_case = pred_logits.size(1 )
snake_case , snake_case = pred_logits.topk(1 , dim=-1 , largest=A__ , sorted=A__ )
snake_case = preds_index.view(-1 , A__ )[:, 1:]
snake_case = decoder(A__ )
snake_case , snake_case = torch.nn.functional.softmax(A__ , dim=2 ).max(dim=2 )
snake_case = preds_max_prob[:, 1:]
for index in range(A__ ):
snake_case = preds_str[index].find(A__ )
snake_case = preds_str[index][:pred_eos]
snake_case = preds_index[index].cpu().tolist()
snake_case = pred_index.index(A__ ) if eos_token in pred_index else -1
snake_case = preds_max_prob[index][: pred_eos_index + 1]
snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A__ )
conf_scores.append(A__ )
return dec_strs, conf_scores
def UpperCamelCase ( self , A__ ) -> int:
snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(A__ )]
return decode_strs
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.bpe_tokenizer.batch_decode(A__ )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(A__ )]
return decode_strs
| 44
| 1
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowercase = 16
_lowercase = 32
def __UpperCamelCase ( a : Accelerator , a : int = 16 ) ->int:
snake_case = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(a : Optional[Any] ):
# max_length=None => use the model max length (it's actually the default)
snake_case = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=a , max_length=a )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case = datasets.map(
a , batched=a , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(a : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case = 16
elif accelerator.mixed_precision != "no":
snake_case = 8
else:
snake_case = None
return tokenizer.pad(
a , padding='''longest''' , max_length=a , pad_to_multiple_of=a , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case = DataLoader(
tokenized_datasets['''train'''] , shuffle=a , collate_fn=a , batch_size=a )
snake_case = DataLoader(
tokenized_datasets['''validation'''] , shuffle=a , collate_fn=a , batch_size=a )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
_lowercase = mocked_dataloaders # noqa: F811
def __UpperCamelCase ( a : str , a : List[Any] ) ->str:
# For testing only
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , a ) == "1":
snake_case = 2
# Initialize accelerator
snake_case = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case = config['''lr''']
snake_case = int(config['''num_epochs'''] )
snake_case = int(config['''seed'''] )
snake_case = int(config['''batch_size'''] )
snake_case = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case = batch_size // MAX_GPU_BATCH_SIZE
snake_case = MAX_GPU_BATCH_SIZE
set_seed(a )
snake_case , snake_case = get_dataloaders(a , a )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=a )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case = model.to(accelerator.device )
# Instantiate optimizer
snake_case = AdamW(params=model.parameters() , lr=a )
# Instantiate scheduler
snake_case = get_linear_schedule_with_warmup(
optimizer=a , num_warmup_steps=100 , num_training_steps=(len(a ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case , snake_case , snake_case , snake_case , snake_case = accelerator.prepare(
a , a , a , a , a )
# Now we train the model
for epoch in range(a ):
model.train()
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case = model(**a )
snake_case = outputs.loss
snake_case = loss / gradient_accumulation_steps
accelerator.backward(a )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
snake_case = 0
for step, batch in enumerate(a ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case = model(**a )
snake_case = outputs.logits.argmax(dim=-1 )
snake_case , snake_case = accelerator.gather((predictions, batch['''labels''']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(a ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
snake_case = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=a , references=a , )
snake_case = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , a )
def __UpperCamelCase ( ) ->Tuple:
snake_case = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=a , default=a , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
snake_case = parser.parse_args()
snake_case = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(a , a )
if __name__ == "__main__":
main()
| 44
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_lowercase , _lowercase , _lowercase = False, False, False
@dataclass
class _lowercase :
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = None
# Automatically constructed
_UpperCAmelCase = "dict"
_UpperCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_UpperCAmelCase = field(default='''Audio''' , init=__a , repr=__a )
def __call__( self ) -> Optional[Any]:
return self.pa_type
def UpperCamelCase ( self , A__ ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(A__ , A__ ):
return {"bytes": None, "path": value}
elif isinstance(A__ , A__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case = BytesIO()
sf.write(A__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
snake_case = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67
snake_case = BytesIO(bytes() )
sf.write(A__ , A__ , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def UpperCamelCase ( self , A__ , A__ = None ) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
snake_case , snake_case = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
snake_case = xsplitext(A__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
snake_case = token_per_repo_id or {}
snake_case = path.split('''::''' )[-1]
try:
snake_case = string_to_dict(A__ , config.HUB_DATASETS_URL )['''repo_id''']
snake_case = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case = None
with xopen(A__ , '''rb''' , use_auth_token=A__ ) as f:
snake_case , snake_case = sf.read(A__ )
else:
snake_case , snake_case = sf.read(A__ )
snake_case = array.T
if self.mono:
snake_case = librosa.to_mono(A__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case = librosa.resample(A__ , orig_sr=A__ , target_sr=self.sampling_rate )
snake_case = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def UpperCamelCase ( self , A__ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
snake_case = pa.array([None] * len(A__ ) , type=pa.binary() )
snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case = pa.array([None] * len(A__ ) , type=pa.string() )
snake_case = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
snake_case = pa.array([Audio().encode_example(A__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
snake_case = storage.field('''bytes''' )
else:
snake_case = pa.array([None] * len(A__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
snake_case = storage.field('''path''' )
else:
snake_case = pa.array([None] * len(A__ ) , type=pa.string() )
snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(A__ , self.pa_type )
def UpperCamelCase ( self , A__ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(A__ ):
with xopen(A__ , '''rb''' ) as f:
snake_case = f.read()
return bytes_
snake_case = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case = pa.array(
[os.path.basename(A__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(A__ , self.pa_type )
| 44
| 1
|
'''simple docstring'''
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_lowercase = datasets.logging.get_logger(__name__)
_lowercase = '\\n@InProceedings{moosavi2019minimum,\n author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},\n title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},\n year = {2019},\n booktitle = {Proceedings of the 57th Annual Meeting of\n the Association for Computational Linguistics (Volume 1: Long Papers)},\n publisher = {Association for Computational Linguistics},\n address = {Florence, Italy},\n}\n\n@inproceedings{10.3115/1072399.1072405,\nauthor = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},\ntitle = {A Model-Theoretic Coreference Scoring Scheme},\nyear = {1995},\nisbn = {1558604022},\npublisher = {Association for Computational Linguistics},\naddress = {USA},\nurl = {https://doi.org/10.3115/1072399.1072405},\ndoi = {10.3115/1072399.1072405},\nbooktitle = {Proceedings of the 6th Conference on Message Understanding},\npages = {45–52},\nnumpages = {8},\nlocation = {Columbia, Maryland},\nseries = {MUC6 ’95}\n}\n\n@INPROCEEDINGS{Bagga98algorithmsfor,\n author = {Amit Bagga and Breck Baldwin},\n title = {Algorithms for Scoring Coreference Chains},\n booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},\n year = {1998},\n pages = {563--566}\n}\n\n@INPROCEEDINGS{Luo05oncoreference,\n author = {Xiaoqiang Luo},\n title = {On coreference resolution performance metrics},\n booktitle = {In Proc. of HLT/EMNLP},\n year = {2005},\n pages = {25--32},\n publisher = {URL}\n}\n\n@inproceedings{moosavi-strube-2016-coreference,\n title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",\n author = "Moosavi, Nafise Sadat and\n Strube, Michael",\n booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",\n month = aug,\n year = "2016",\n address = "Berlin, Germany",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/P16-1060",\n doi = "10.18653/v1/P16-1060",\n pages = "632--642",\n}\n\n'
_lowercase = '\\nCoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which\nimplements of the common evaluation metrics including MUC [Vilain et al, 1995],\nB-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],\nLEA [Moosavi and Strube, 2016] and the averaged CoNLL score\n(the average of the F1 values of MUC, B-cubed and CEAFe)\n[Denis and Baldridge, 2009a; Pradhan et al., 2011].\n\nThis wrapper of CoVal currently only work with CoNLL line format:\nThe CoNLL format has one word per line with all the annotation for this word in column separated by spaces:\nColumn Type Description\n1 Document ID This is a variation on the document filename\n2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.\n3 Word number\n4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.\n5 Part-of-Speech\n6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.\n7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"\n8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.\n9 Word sense This is the word sense of the word in Column 3.\n10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.\n11 Named Entities These columns identifies the spans representing various named entities.\n12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.\nN Coreference Coreference chain information encoded in a parenthesis structure.\nMore informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html\n\nDetails on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md\n\nCoVal code was written by @ns-moosavi.\nSome parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py\nThe test suite is taken from https://github.com/conll/reference-coreference-scorers/\nMention evaluation and the test suite are added by @andreasvc.\nParsing CoNLL files is developed by Leo Born.\n'
_lowercase = '\nCalculates coreference evaluation metrics.\nArgs:\n predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.\n Each prediction is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.\n Each reference is a word with its annotations as a string made of columns joined with spaces.\n Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)\n See the details on the format in the description of the metric.\n keep_singletons: After extracting all mentions of key or system files,\n mentions whose corresponding coreference chain is of size one,\n are considered as singletons. The default evaluation mode will include\n singletons in evaluations if they are included in the key or the system files.\n By setting \'keep_singletons=False\', all singletons in the key and system files\n will be excluded from the evaluation.\n NP_only: Most of the recent coreference resolvers only resolve NP mentions and\n leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.\n min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.\n Minimum spans are determined using the MINA algorithm.\n\nReturns:\n \'mentions\': mentions\n \'muc\': MUC metric [Vilain et al, 1995]\n \'bcub\': B-cubed [Bagga and Baldwin, 1998]\n \'ceafe\': CEAFe [Luo et al., 2005]\n \'lea\': LEA [Moosavi and Strube, 2016]\n \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)\n\nExamples:\n\n >>> coval = datasets.load_metric(\'coval\')\n >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',\n ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',\n ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',\n ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',\n ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',\n ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']\n >>> references = [words]\n >>> predictions = [words]\n >>> results = coval.compute(predictions=predictions, references=references)\n >>> print(results) # doctest:+ELLIPSIS\n {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}\n'
def __UpperCamelCase ( a : Any , a : Union[str, Any] , a : Union[str, Any]=False , a : Any=False , a : Optional[int]=True , a : List[Any]=False , a : Optional[Any]="dummy_doc" ) ->Any:
snake_case = {doc: key_lines}
snake_case = {doc: sys_lines}
snake_case = {}
snake_case = 0
snake_case = 0
snake_case = 0
snake_case = 0
snake_case = 0
snake_case = 0
snake_case , snake_case = reader.get_doc_mentions(a , key_doc_lines[doc] , a )
key_singletons_num += singletons_num
if NP_only or min_span:
snake_case = reader.set_annotated_parse_trees(a , key_doc_lines[doc] , a , a )
snake_case , snake_case = reader.get_doc_mentions(a , sys_doc_lines[doc] , a )
sys_singletons_num += singletons_num
if NP_only or min_span:
snake_case = reader.set_annotated_parse_trees(a , key_doc_lines[doc] , a , a )
if remove_nested:
snake_case , snake_case = reader.remove_nested_coref_mentions(a , a )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
snake_case , snake_case = reader.remove_nested_coref_mentions(a , a )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
snake_case = reader.get_mention_assignments(a , a )
snake_case = reader.get_mention_assignments(a , a )
snake_case = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'''Number of removed nested coreferring mentions in the key '''
f"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'''Number of resulting singleton clusters in the key '''
f"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
f"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'''files, respectively''' )
return doc_coref_infos
def __UpperCamelCase ( a : Tuple , a : Tuple , a : Tuple , a : str , a : Tuple , a : Any , a : str ) ->Any:
snake_case = get_coref_infos(a , a , a , a , a , a )
snake_case = {}
snake_case = 0
snake_case = 0
for name, metric in metrics:
snake_case , snake_case , snake_case = evaluator.evaluate_documents(a , a , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({f"""{name}/recall""": recall, f"""{name}/precision""": precision, f"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , f"""Recall: {recall * 100:.2f}""" , f""" Precision: {precision * 100:.2f}""" , f""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
snake_case = (conll / 3) * 100
logger.info(f"""CoNLL score: {conll:.2f}""" )
output_scores.update({'''conll_score''': conll} )
return output_scores
def __UpperCamelCase ( a : Optional[int] ) ->Tuple:
snake_case = False
for line in key_lines:
if not line.startswith('''#''' ):
if len(line.split() ) > 6:
snake_case = line.split()[5]
if not parse_col == "-":
snake_case = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def UpperCamelCase ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Sequence(datasets.Value('''string''' ) ),
} ) , codebase_urls=['''https://github.com/ns-moosavi/coval'''] , reference_urls=[
'''https://github.com/ns-moosavi/coval''',
'''https://www.aclweb.org/anthology/P16-1060''',
'''http://www.conll.cemantix.org/2012/data.html''',
] , )
def UpperCamelCase ( self , A__ , A__ , A__=True , A__=False , A__=False , A__=False ) -> Union[str, Any]:
snake_case = [
('''mentions''', evaluator.mentions),
('''muc''', evaluator.muc),
('''bcub''', evaluator.b_cubed),
('''ceafe''', evaluator.ceafe),
('''lea''', evaluator.lea),
]
if min_span:
snake_case = util.check_gold_parse_annotation(A__ )
if not has_gold_parse:
raise NotImplementedError('''References should have gold parse annotation to use \'min_span\'.''' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
snake_case = evaluate(
key_lines=A__ , sys_lines=A__ , metrics=A__ , NP_only=A__ , remove_nested=A__ , keep_singletons=A__ , min_span=A__ , )
return score
| 44
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _lowercase :
@staticmethod
def UpperCamelCase ( *A__ , **A__ ) -> List[Any]:
pass
def __UpperCamelCase ( a : Image ) ->str:
snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase ( self , A__ , A__ ) -> List[Any]:
snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ )
import datasets
snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
snake_case = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , A__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def UpperCamelCase ( self ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase ( self ) -> Dict:
snake_case = '''Intel/dpt-large'''
snake_case = pipeline('''depth-estimation''' , model=A__ )
snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
snake_case = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def UpperCamelCase ( self ) -> Any:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 44
| 1
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class _lowercase :
def __init__( self , A__=2 , A__=3 , A__=64 , A__=None ) -> Dict:
snake_case = np.random.default_rng(A__ )
snake_case = length
snake_case = rng.normal(size=(length,) ).astype(np.floataa )
snake_case = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> str:
return self.length
def __getitem__( self , A__ ) -> Optional[Any]:
return {"x": self.x[i], "y": self.y[i]}
class _lowercase ( torch.nn.Module ):
def __init__( self , A__=0 , A__=0 , A__=False ) -> str:
super().__init__()
snake_case = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case = True
def UpperCamelCase ( self , A__=None ) -> Tuple:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case = False
return x * self.a[0] + self.b[0]
class _lowercase ( torch.nn.Module ):
def __init__( self , A__=0 , A__=0 , A__=False ) -> Optional[int]:
super().__init__()
snake_case = torch.nn.Parameter(torch.tensor(A__ ).float() )
snake_case = torch.nn.Parameter(torch.tensor(A__ ).float() )
snake_case = True
def UpperCamelCase ( self , A__=None ) -> List[str]:
if self.first_batch:
print(F"""Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}""" )
snake_case = False
return x * self.a + self.b
def __UpperCamelCase ( a : Tuple , a : int = 16 ) ->Dict:
from datasets import load_dataset
from transformers import AutoTokenizer
snake_case = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case = {'''train''': '''tests/test_samples/MRPC/train.csv''', '''validation''': '''tests/test_samples/MRPC/dev.csv'''}
snake_case = load_dataset('''csv''' , data_files=a )
snake_case = datasets['''train'''].unique('''label''' )
snake_case = {v: i for i, v in enumerate(a )}
def tokenize_function(a : List[str] ):
# max_length=None => use the model max length (it's actually the default)
snake_case = tokenizer(
examples['''sentence1'''] , examples['''sentence2'''] , truncation=a , max_length=a , padding='''max_length''' )
if "label" in examples:
snake_case = [label_to_id[l] for l in examples['''label''']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case = datasets.map(
a , batched=a , remove_columns=['''sentence1''', '''sentence2''', '''label'''] , )
def collate_fn(a : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(a , padding='''max_length''' , max_length=128 , return_tensors='''pt''' )
return tokenizer.pad(a , padding='''longest''' , return_tensors='''pt''' )
# Instantiate dataloaders.
snake_case = DataLoader(tokenized_datasets['''train'''] , shuffle=a , collate_fn=a , batch_size=2 )
snake_case = DataLoader(tokenized_datasets['''validation'''] , shuffle=a , collate_fn=a , batch_size=1 )
return train_dataloader, eval_dataloader
| 44
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __UpperCamelCase ( a : Optional[int] ) ->Dict:
snake_case = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a , a )
def __UpperCamelCase ( a : Optional[Any] ) ->int:
snake_case = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
snake_case = s_dict.pop(a )
elif "subsample" in key:
snake_case = s_dict.pop(a )
def __UpperCamelCase ( a : Optional[int] ) ->Optional[int]:
snake_case , snake_case = emb.weight.shape
snake_case = nn.Linear(a , a , bias=a )
snake_case = emb.weight.data
return lin_layer
def __UpperCamelCase ( a : Any , a : Tuple ) ->Tuple:
snake_case = torch.load(a , map_location='''cpu''' )
snake_case = mam_aaa['''args''']
snake_case = mam_aaa['''model''']
snake_case = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(a )
rename_keys(a )
snake_case = state_dict['''decoder.embed_tokens.weight'''].shape[0]
snake_case = args.share_decoder_input_output_embed
snake_case = [int(a ) for i in args.conv_kernel_sizes.split(''',''' )]
snake_case = SpeechaTextConfig(
vocab_size=a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(a ) , conv_channels=args.conv_channels , conv_kernel_sizes=a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=a , num_beams=5 , max_length=200 , use_cache=a , decoder_start_token_id=2 , early_stopping=a , )
snake_case = SpeechaTextForConditionalGeneration(a )
snake_case , snake_case = model.model.load_state_dict(a , strict=a )
if len(a ) > 0 and not set(a ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
snake_case = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case = lm_head_weights
model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowercase = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 44
| 1
|
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def __UpperCamelCase ( a : dict , a : str , a : set , a : set , a : dict , a : dict , a : PriorityQueue , a : dict , a : float | int , ) ->float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case = cst_fwd.get(a , np.inf )
snake_case = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case = new_cost_f
snake_case = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __UpperCamelCase ( a : str , a : str , a : dict , a : dict ) ->int:
snake_case = -1
snake_case = set()
snake_case = set()
snake_case = {source: 0}
snake_case = {destination: 0}
snake_case = {source: None}
snake_case = {destination: None}
snake_case = PriorityQueue()
snake_case = PriorityQueue()
snake_case = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case , snake_case = queue_forward.get()
visited_forward.add(a )
snake_case , snake_case = queue_backward.get()
visited_backward.add(a )
snake_case = pass_and_relaxation(
a , a , a , a , a , a , a , a , a , )
snake_case = pass_and_relaxation(
a , a , a , a , a , a , a , a , a , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case = shortest_distance
return shortest_path_distance
_lowercase = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
_lowercase = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=__a ):
_UpperCAmelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *A__ , **A__ ) -> Union[str, Any]:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Optional[Any]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Any:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 44
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __UpperCamelCase ( a : List[Any]=None ) ->List[str]:
snake_case = argparse.ArgumentParser(add_help=a , allow_abbrev=a )
# The main config parser
snake_case = config_command_parser(a )
# The subparser to add commands to
snake_case = config_parser.add_subparsers(title='''subcommands''' , dest='''subcommand''' )
# Then add other parsers with the parent parser
default_command_parser(a , parents=[parent_parser] )
update_command_parser(a , parents=[parent_parser] )
return config_parser
def __UpperCamelCase ( ) ->List[str]:
snake_case = get_config_parser()
snake_case = config_parser.parse_args()
if not hasattr(a , '''func''' ):
config_parser.print_help()
exit(1 )
# Run
args.func(a )
if __name__ == "__main__":
main()
| 44
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _lowercase :
def __init__( self , A__ ) -> None:
snake_case = value
snake_case = None
snake_case = None
class _lowercase :
def __init__( self , A__ ) -> None:
snake_case = tree
def UpperCamelCase ( self , A__ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowercase = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 44
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_lowercase = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def __UpperCamelCase ( a : List[str] ) ->Optional[int]:
snake_case = torch.load(a , map_location='''cpu''' )
return sd
def __UpperCamelCase ( a : Optional[int] , a : Union[str, Any] , a : int=rename_keys_prefix ) ->Tuple:
snake_case = OrderedDict()
snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case = key
for name_pair in rename_keys_prefix:
snake_case = new_key.replace(name_pair[0] , name_pair[1] )
snake_case = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def __UpperCamelCase ( a : Optional[int] , a : int ) ->Union[str, Any]:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
snake_case = '''pretraining'''
if "vcr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 512}
snake_case = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
snake_case = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
snake_case = '''vqa'''
elif "nlvr" in checkpoint_path:
snake_case = {
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
snake_case = '''nlvr'''
snake_case = VisualBertConfig(**a )
# Load State Dict
snake_case = load_state_dict(a )
snake_case = get_new_dict(a , a )
if model_type == "pretraining":
snake_case = VisualBertForPreTraining(a )
elif model_type == "vqa":
snake_case = VisualBertForQuestionAnswering(a )
elif model_type == "nlvr":
snake_case = VisualBertForVisualReasoning(a )
elif model_type == "multichoice":
snake_case = VisualBertForMultipleChoice(a )
model.load_state_dict(a )
# Save Checkpoints
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_lowercase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase = logging.get_logger(__name__)
class _lowercase ( __a ):
def __init__( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]:
snake_case = feature_size
snake_case = sampling_rate
snake_case = padding_value
snake_case = kwargs.pop('''padding_side''' , '''right''' )
snake_case = kwargs.pop('''return_attention_mask''' , A__ )
super().__init__(**A__ )
def UpperCamelCase ( self , A__ , A__ = True , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
snake_case = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
snake_case = processed_features[self.model_input_names[0]]
snake_case = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A__ ) == 0:
if return_attention_mask:
snake_case = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
snake_case = required_input[0]
if isinstance(A__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
snake_case = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A__ ):
snake_case = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A__ ):
snake_case = '''tf'''
elif is_torch_tensor(A__ ):
snake_case = '''pt'''
elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ):
snake_case = '''np'''
else:
raise ValueError(
F"""type of {first_element} unknown: {type(A__ )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
snake_case = to_numpy(A__ )
else:
snake_case = [to_numpy(A__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
snake_case = self._get_padding_strategies(padding=A__ , max_length=A__ )
snake_case = processed_features[self.model_input_names[0]]
snake_case = len(A__ )
if not all(len(A__ ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
snake_case = []
for i in range(A__ ):
snake_case = {k: v[i] for k, v in processed_features.items()}
# truncation
snake_case = self._truncate(
A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , )
truncated_inputs.append(A__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
snake_case = PaddingStrategy.MAX_LENGTH
snake_case = {}
for i in range(A__ ):
# padding
snake_case = self._pad(
truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , )
for key, value in outputs.items():
if key not in batch_outputs:
snake_case = []
if value.dtype is np.dtype(np.floataa ):
snake_case = value.astype(np.floataa )
batch_outputs[key].append(A__ )
return BatchFeature(A__ , tensor_type=A__ )
def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> dict:
snake_case = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
snake_case = len(A__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
snake_case = np.ones(len(A__ ) , dtype=np.intaa )
if needs_to_be_padded:
snake_case = max_length - len(A__ )
if self.padding_side == "right":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
snake_case = np.pad(
A__ , A__ , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
snake_case = np.pad(
A__ , A__ , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , ) -> Union[str, Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
snake_case = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = len(A__ ) > max_length
if needs_to_be_truncated:
snake_case = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
snake_case = processed_features['''attention_mask'''][:max_length]
return processed_features
def UpperCamelCase ( self , A__=False , A__=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A__ , A__ ):
snake_case = PaddingStrategy(A__ )
elif isinstance(A__ , A__ ):
snake_case = padding
else:
snake_case = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 44
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]:
snake_case = original_name.split('''.''' )[0]
snake_case = key.split('''.''' )
snake_case = int(key_list[key_list.index(a ) - 2] )
snake_case = int(key_list[key_list.index(a ) - 1] )
snake_case = orig_block_num - offset
snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def __UpperCamelCase ( a : Tuple ) ->Dict:
snake_case = OrderedDict()
snake_case , snake_case = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case = key[: key.find('''proj''' )]
snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" )
snake_case = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case = key.replace('''head''' , '''classifier''' )
snake_case = value
return new_state_dict
def __UpperCamelCase ( ) ->Optional[int]:
snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case = Image.open(requests.get(a , stream=a ).raw )
return image
@torch.no_grad()
def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]:
snake_case = PoolFormerConfig()
# set attributes based on model_name
snake_case = '''huggingface/label-files'''
snake_case = model_name[-3:]
snake_case = 1000
snake_case = '''imagenet-1k-id2label.json'''
snake_case = (1, 1000)
# set config attributes
snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
snake_case = {int(a ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case = [2, 2, 6, 2]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s24":
snake_case = [4, 4, 12, 4]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s36":
snake_case = [6, 6, 18, 6]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.9
elif size == "m36":
snake_case = [6, 6, 18, 6]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
elif size == "m48":
snake_case = [8, 8, 24, 8]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
# Prepare image
snake_case = prepare_img()
snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
snake_case = torch.load(a , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case = rename_keys(a )
# create HuggingFace model and load state dict
snake_case = PoolFormerForImageClassification(a )
model.load_state_dict(a )
model.eval()
# Define image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case = model(a )
snake_case = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
snake_case = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
snake_case = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
snake_case = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowercase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 1
|
'''simple docstring'''
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'stable diffusion controlnet',
'0.22.0',
'Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.',
standard_warn=False,
stacklevel=3,
)
| 44
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowercase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowercase = logging.getLogger()
def __UpperCamelCase ( ) ->Tuple:
snake_case = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case = parser.parse_args()
return args.f
def __UpperCamelCase ( a : Dict , a : Tuple="eval" ) ->List[Any]:
snake_case = os.path.join(a , f"""{split}_results.json""" )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
return json.load(a )
raise ValueError(f"""can't find {path}""" )
_lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowercase ( __a ):
def UpperCamelCase ( self ) -> List[str]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_flax_glue.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_clm_flax.main()
snake_case = get_results(A__ )
self.assertLess(result['''eval_perplexity'''] , 1_00 )
@slow
def UpperCamelCase ( self ) -> int:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_summarization_flax.main()
snake_case = get_results(A__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_mlm_flax.main()
snake_case = get_results(A__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_ta_mlm_flax.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def UpperCamelCase ( self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case = 7 if get_gpu_count() > 1 else 2
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_flax_ner.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_qa.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 44
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = KandinskyVaaControlnetImgaImgPipeline
_UpperCAmelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_UpperCAmelCase = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint''']
_UpperCAmelCase = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_UpperCAmelCase = False
@property
def UpperCamelCase ( self ) -> Any:
return 32
@property
def UpperCamelCase ( self ) -> Optional[int]:
return 32
@property
def UpperCamelCase ( self ) -> Union[str, Any]:
return self.time_input_dim
@property
def UpperCamelCase ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def UpperCamelCase ( self ) -> List[str]:
return 1_00
@property
def UpperCamelCase ( self ) -> int:
torch.manual_seed(0 )
snake_case = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case = UNetaDConditionModel(**A__ )
return model
@property
def UpperCamelCase ( self ) -> Union[str, Any]:
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase ( self ) -> Any:
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0_0_8_5,
'''beta_end''': 0.0_1_2,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
snake_case = DDIMScheduler(**A__ )
snake_case = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCamelCase ( self , A__ , A__=0 ) -> Tuple:
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(A__ ) ).to(A__ )
snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
A__ )
# create init_image
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(A__ ) ).to(A__ )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(A__ ) ).convert('''RGB''' ).resize((2_56, 2_56) )
# create hint
snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(A__ ) ).to(A__ )
if str(A__ ).startswith('''mps''' ):
snake_case = torch.manual_seed(A__ )
else:
snake_case = torch.Generator(device=A__ ).manual_seed(A__ )
snake_case = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase ( self ) -> int:
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**A__ )
snake_case = pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
snake_case = pipe(**self.get_dummy_inputs(A__ ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(A__ ) , return_dict=A__ , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case = np.array(
[0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case = init_image.resize((5_12, 5_12) )
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
snake_case = torch.from_numpy(np.array(A__ ) ).float() / 2_5_5.0
snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
snake_case = '''A robot, 4k photo'''
snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(A__ )
snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
snake_case = pipeline.to(A__ )
pipeline.set_progress_bar_config(disable=A__ )
snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
A__ , image=A__ , strength=0.8_5 , generator=A__ , negative_prompt='''''' , ).to_tuple()
snake_case = pipeline(
image=A__ , image_embeds=A__ , negative_image_embeds=A__ , hint=A__ , generator=A__ , num_inference_steps=1_00 , height=5_12 , width=5_12 , strength=0.5 , output_type='''np''' , )
snake_case = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(A__ , A__ )
| 44
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_lowercase = logging.get_logger(__name__)
_lowercase = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _lowercase ( __a ):
def __init__( self , A__=None , A__=None , *A__ , **A__ ) -> Union[str, Any]:
super().__init__(*A__ , **A__ )
if config is None:
assert isinstance(self.model , A__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
snake_case = self.model.config
else:
snake_case = config
snake_case = data_args
snake_case = self.config.tgt_vocab_size if isinstance(self.config , A__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case = label_smoothed_nll_loss
def UpperCamelCase ( self , A__ ) -> Tuple:
if self.optimizer is None:
snake_case = ['''bias''', '''LayerNorm.weight''']
snake_case = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
snake_case = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case = Adafactor
snake_case = {'''scale_parameter''': False, '''relative_step''': False}
else:
snake_case = AdamW
snake_case = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
snake_case = self.args.learning_rate
if self.sharded_ddp:
snake_case = OSS(
params=A__ , optim=A__ , **A__ , )
else:
snake_case = optimizer_cls(A__ , **A__ )
if self.lr_scheduler is None:
snake_case = self._get_lr_scheduler(A__ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def UpperCamelCase ( self , A__ ) -> Tuple:
snake_case = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A__ )
return scheduler
def UpperCamelCase ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case = model(**A__ , use_cache=A__ )[0]
snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case , snake_case = model(**A__ , labels=A__ , use_cache=A__ )[:2]
else:
# compute label smoothed loss
snake_case = model(**A__ , use_cache=A__ )[0]
snake_case = torch.nn.functional.log_softmax(A__ , dim=-1 )
snake_case , snake_case = self.loss_fn(A__ , A__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase ( self , A__ , A__ ) -> Any:
snake_case = inputs.pop('''labels''' )
snake_case , snake_case = self._compute_loss(A__ , A__ , A__ )
return loss
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
snake_case = self._prepare_inputs(A__ )
snake_case = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **A__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] )
snake_case = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
snake_case , snake_case = self._compute_loss(A__ , A__ , A__ )
snake_case = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def UpperCamelCase ( self , A__ , A__ ) -> List[str]:
# If PAD token is not defined at least EOS token has to be defined
snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F""" padded to `max_length`={max_length}""" )
snake_case = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case = tensor
return padded_tensor
| 44
| 1
|
'''simple docstring'''
import numpy
class _lowercase :
def __init__( self , A__ , A__ ) -> None:
snake_case = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
snake_case = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
snake_case = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
snake_case = numpy.random.rand(3 , 1 )
# Real output values provided.
snake_case = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
snake_case = numpy.zeros(output_array.shape )
def UpperCamelCase ( self ) -> numpy.ndarray:
snake_case = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
snake_case = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
snake_case = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def UpperCamelCase ( self ) -> None:
snake_case = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
snake_case = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
snake_case = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def UpperCamelCase ( self , A__ , A__ , A__ ) -> None:
for iteration in range(1 , iterations + 1 ):
snake_case = self.feedforward()
self.back_propagation()
if give_loss:
snake_case = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def UpperCamelCase ( self , A__ ) -> int:
snake_case = input_arr
snake_case = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
snake_case = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
snake_case = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def __UpperCamelCase ( a : numpy.ndarray ) ->numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def __UpperCamelCase ( a : numpy.ndarray ) ->numpy.ndarray:
return (value) * (1 - (value))
def __UpperCamelCase ( ) ->int:
snake_case = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
snake_case = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
snake_case = TwoHiddenLayerNeuralNetwork(
input_array=a , output_array=a )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=a , iterations=10 , give_loss=a )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 44
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCamelCase ( a : List[str] ) ->str:
snake_case = []
for line in lines:
snake_case = re.sub(R'''#.*''' , '''''' , a ) # remove comments
if line:
filtered_lines.append(a )
snake_case = '''\n'''.join(a )
# Make a hash from all this code
snake_case = full_str.encode('''utf-8''' )
return shaaaa(a ).hexdigest()
# get importable module names and hash for caching
_lowercase = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowercase = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowercase = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
_lowercase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 44
| 1
|
'''simple docstring'''
import unittest
from transformers import DonutProcessor
_lowercase = 'naver-clova-ix/donut-base'
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> Dict:
snake_case = DonutProcessor.from_pretrained(A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = {
'''name''': '''John Doe''',
'''age''': '''99''',
'''city''': '''Atlanta''',
'''state''': '''GA''',
'''zip''': '''30301''',
'''phone''': '''123-4567''',
'''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}],
}
snake_case = (
'''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'''
'''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'''
'''<s_nicknames><s_nickname>Johnny</s_nickname>'''
'''<sep/><s_nickname>JD</s_nickname></s_nicknames>'''
)
snake_case = self.processor.tokenajson(A__ )
self.assertDictEqual(A__ , A__ )
| 44
|
'''simple docstring'''
_lowercase = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 1
|
'''simple docstring'''
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_lowercase = random.Random()
def __UpperCamelCase ( a : List[Any] , a : int=1.0 , a : Dict=None , a : str=None ) ->Optional[int]:
if rng is None:
snake_case = global_rng
snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _lowercase ( unittest.TestCase ):
def __init__( self , A__ , A__=7 , A__=4_00 , A__=20_00 , A__=10 , A__=1_60 , A__=8 , A__=0.0 , A__=40_00 , A__=False , A__=True , ) -> Union[str, Any]:
snake_case = parent
snake_case = batch_size
snake_case = min_seq_length
snake_case = max_seq_length
snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case = padding_value
snake_case = sampling_rate
snake_case = return_attention_mask
snake_case = do_normalize
snake_case = feature_size
snake_case = chunk_length
snake_case = hop_length
def UpperCamelCase ( self ) -> Tuple:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase ( self , A__=False , A__=False ) -> List[Any]:
def _flatten(A__ ):
return list(itertools.chain(*A__ ) )
if equal_length:
snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case = [np.asarray(A__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = WhisperFeatureExtractor if is_speech_available() else None
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = WhisperFeatureExtractionTester(self )
def UpperCamelCase ( self ) -> Any:
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = feat_extract_first.save_pretrained(A__ )[0]
check_json_file_has_correct_format(A__ )
snake_case = self.feature_extraction_class.from_pretrained(A__ )
snake_case = feat_extract_first.to_dict()
snake_case = feat_extract_second.to_dict()
snake_case = feat_extract_first.mel_filters
snake_case = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def UpperCamelCase ( self ) -> str:
snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = os.path.join(A__ , '''feat_extract.json''' )
feat_extract_first.to_json_file(A__ )
snake_case = self.feature_extraction_class.from_json_file(A__ )
snake_case = feat_extract_first.to_dict()
snake_case = feat_extract_second.to_dict()
snake_case = feat_extract_first.mel_filters
snake_case = feat_extract_second.mel_filters
self.assertTrue(np.allclose(A__ , A__ ) )
self.assertEqual(A__ , A__ )
def UpperCamelCase ( self ) -> List[str]:
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
snake_case = [np.asarray(A__ ) for speech_input in speech_inputs]
# Test feature size
snake_case = feature_extractor(A__ , padding='''max_length''' , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test batched
snake_case = feature_extractor(A__ , return_tensors='''np''' ).input_features
snake_case = feature_extractor(A__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
snake_case = np.asarray(A__ )
snake_case = feature_extractor(A__ , return_tensors='''np''' ).input_features
snake_case = feature_extractor(A__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
# Test truncation required
snake_case = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )]
snake_case = [np.asarray(A__ ) for speech_input in speech_inputs]
snake_case = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case = [np.asarray(A__ ) for speech_input in speech_inputs_truncated]
snake_case = feature_extractor(A__ , return_tensors='''np''' ).input_features
snake_case = feature_extractor(A__ , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(A__ , A__ ):
self.assertTrue(np.allclose(A__ , A__ , atol=1e-3 ) )
def UpperCamelCase ( self ) -> List[Any]:
import torch
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = np.random.rand(1_00 , 32 ).astype(np.floataa )
snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
snake_case = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
snake_case = ds.sort('''id''' ).select(range(A__ ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase ( self ) -> List[str]:
# fmt: off
snake_case = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
snake_case = self._load_datasamples(1 )
snake_case = WhisperFeatureExtractor()
snake_case = feature_extractor(A__ , return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape , (1, 80, 30_00) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A__ , atol=1e-4 ) )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case = self._load_datasamples(1 )[0]
snake_case = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue
snake_case = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A__ )[0]
self.assertTrue(np.all(np.mean(A__ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(A__ ) - 1 ) < 1e-3 ) )
| 44
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = IFInpaintingSuperResolutionPipeline
_UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase ( self ) -> int:
return self._get_superresolution_dummy_components()
def UpperCamelCase ( self , A__ , A__=0 ) -> Union[str, Any]:
if str(A__ ).startswith('''mps''' ):
snake_case = torch.manual_seed(A__ )
else:
snake_case = torch.Generator(device=A__ ).manual_seed(A__ )
snake_case = floats_tensor((1, 3, 16, 16) , rng=random.Random(A__ ) ).to(A__ )
snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def UpperCamelCase ( self ) -> List[str]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase ( self ) -> Optional[Any]:
self._test_save_load_local()
def UpperCamelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 44
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowercase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.weight', f'encoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.encoder.layers.{i}.self_attn.out_proj.bias', f'encoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'encoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'encoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'encoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'encoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.encoder.layers.{i}.norm1.weight', f'encoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'encoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'encoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'encoder.layers.{i}.final_layer_norm.bias'))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'decoder.layers.{i}.self_attn.out_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'decoder.layers.{i}.self_attn.out_proj.bias')
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.weight',
f'decoder.layers.{i}.encoder_attn.out_proj.weight',
)
)
rename_keys.append(
(
f'transformer.decoder.layers.{i}.cross_attn.out_proj.bias',
f'decoder.layers.{i}.encoder_attn.out_proj.bias',
)
)
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'decoder.layers.{i}.fc1.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'decoder.layers.{i}.fc1.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'decoder.layers.{i}.fc2.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'decoder.layers.{i}.fc2.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm1.weight', f'decoder.layers.{i}.self_attn_layer_norm.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'decoder.layers.{i}.self_attn_layer_norm.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.weight', f'decoder.layers.{i}.encoder_attn_layer_norm.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.norm2.bias', f'decoder.layers.{i}.encoder_attn_layer_norm.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'decoder.layers.{i}.final_layer_norm.weight'))
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'decoder.layers.{i}.final_layer_norm.bias'))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.weight', f'decoder.layers.{i}.sa_qcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.weight', f'decoder.layers.{i}.sa_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qpos_proj.weight', f'decoder.layers.{i}.sa_qpos_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kpos_proj.weight', f'decoder.layers.{i}.sa_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.weight', f'decoder.layers.{i}.sa_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.weight', f'decoder.layers.{i}.ca_qcontent_proj.weight')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.weight', f'decoder.layers.{i}.ca_kcontent_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kpos_proj.weight', f'decoder.layers.{i}.ca_kpos_proj.weight')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.weight', f'decoder.layers.{i}.ca_v_proj.weight'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight', f'decoder.layers.{i}.ca_qpos_sine_proj.weight')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_qcontent_proj.bias', f'decoder.layers.{i}.sa_qcontent_proj.bias')
)
rename_keys.append(
(f'transformer.decoder.layers.{i}.sa_kcontent_proj.bias', f'decoder.layers.{i}.sa_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.sa_qpos_proj.bias', f'decoder.layers.{i}.sa_qpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_kpos_proj.bias', f'decoder.layers.{i}.sa_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.sa_v_proj.bias', f'decoder.layers.{i}.sa_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qcontent_proj.bias', f'decoder.layers.{i}.ca_qcontent_proj.bias')
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_kcontent_proj.bias', f'decoder.layers.{i}.ca_kcontent_proj.bias')
)
rename_keys.append((f'transformer.decoder.layers.{i}.ca_kpos_proj.bias', f'decoder.layers.{i}.ca_kpos_proj.bias'))
rename_keys.append((f'transformer.decoder.layers.{i}.ca_v_proj.bias', f'decoder.layers.{i}.ca_v_proj.bias'))
rename_keys.append(
(f'transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias', f'decoder.layers.{i}.ca_qpos_sine_proj.bias')
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('input_proj.weight', 'input_projection.weight'),
('input_proj.bias', 'input_projection.bias'),
('query_embed.weight', 'query_position_embeddings.weight'),
('transformer.decoder.norm.weight', 'decoder.layernorm.weight'),
('transformer.decoder.norm.bias', 'decoder.layernorm.bias'),
('class_embed.weight', 'class_labels_classifier.weight'),
('class_embed.bias', 'class_labels_classifier.bias'),
('bbox_embed.layers.0.weight', 'bbox_predictor.layers.0.weight'),
('bbox_embed.layers.0.bias', 'bbox_predictor.layers.0.bias'),
('bbox_embed.layers.1.weight', 'bbox_predictor.layers.1.weight'),
('bbox_embed.layers.1.bias', 'bbox_predictor.layers.1.bias'),
('bbox_embed.layers.2.weight', 'bbox_predictor.layers.2.weight'),
('bbox_embed.layers.2.bias', 'bbox_predictor.layers.2.bias'),
('transformer.decoder.ref_point_head.layers.0.weight', 'decoder.ref_point_head.layers.0.weight'),
('transformer.decoder.ref_point_head.layers.0.bias', 'decoder.ref_point_head.layers.0.bias'),
('transformer.decoder.ref_point_head.layers.1.weight', 'decoder.ref_point_head.layers.1.weight'),
('transformer.decoder.ref_point_head.layers.1.bias', 'decoder.ref_point_head.layers.1.bias'),
('transformer.decoder.query_scale.layers.0.weight', 'decoder.query_scale.layers.0.weight'),
('transformer.decoder.query_scale.layers.0.bias', 'decoder.query_scale.layers.0.bias'),
('transformer.decoder.query_scale.layers.1.weight', 'decoder.query_scale.layers.1.weight'),
('transformer.decoder.query_scale.layers.1.bias', 'decoder.query_scale.layers.1.bias'),
('transformer.decoder.layers.0.ca_qpos_proj.weight', 'decoder.layers.0.ca_qpos_proj.weight'),
('transformer.decoder.layers.0.ca_qpos_proj.bias', 'decoder.layers.0.ca_qpos_proj.bias'),
]
)
def __UpperCamelCase ( a : Any , a : Optional[Any] , a : List[Any] ) ->Any:
snake_case = state_dict.pop(a )
snake_case = val
def __UpperCamelCase ( a : int ) ->Tuple:
snake_case = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
snake_case = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
snake_case = value
else:
snake_case = value
return new_state_dict
def __UpperCamelCase ( a : Tuple , a : List[str]=False ) ->Optional[Any]:
snake_case = ''''''
if is_panoptic:
snake_case = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
snake_case = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
snake_case = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[:256, :]
snake_case = in_proj_bias[:256]
snake_case = in_proj_weight[256:512, :]
snake_case = in_proj_bias[256:512]
snake_case = in_proj_weight[-256:, :]
snake_case = in_proj_bias[-256:]
def __UpperCamelCase ( ) ->List[str]:
snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case = Image.open(requests.get(a , stream=a ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( a : int , a : Any ) ->Tuple:
snake_case = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
snake_case = '''resnet101'''
if "dc5" in model_name:
snake_case = True
snake_case = '''panoptic''' in model_name
if is_panoptic:
snake_case = 250
else:
snake_case = 91
snake_case = '''huggingface/label-files'''
snake_case = '''coco-detection-id2label.json'''
snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
snake_case = {int(a ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
# load image processor
snake_case = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
snake_case = ConditionalDetrImageProcessor(format=a )
# prepare image
snake_case = prepare_img()
snake_case = image_processor(images=a , return_tensors='''pt''' )
snake_case = encoding['''pixel_values''']
logger.info(f"""Converting model {model_name}...""" )
# load original model from torch hub
snake_case = torch.hub.load('''DeppMeng/ConditionalDETR''' , a , pretrained=a ).eval()
snake_case = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
snake_case = '''conditional_detr.''' + src
rename_key(a , a , a )
snake_case = rename_backbone_keys(a )
# query, key and value matrices need special treatment
read_in_q_k_v(a , is_panoptic=a )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
snake_case = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
snake_case = state_dict.pop(a )
snake_case = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
snake_case = state_dict.pop(a )
snake_case = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
snake_case = state_dict.pop(a )
snake_case = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
snake_case = state_dict.pop(a )
snake_case = val
# finally, create HuggingFace model and load state dict
snake_case = ConditionalDetrForSegmentation(a ) if is_panoptic else ConditionalDetrForObjectDetection(a )
model.load_state_dict(a )
model.eval()
model.push_to_hub(repo_id=a , organization='''DepuMeng''' , commit_message='''Add model''' )
# verify our conversion
snake_case = conditional_detr(a )
snake_case = model(a )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
image_processor.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='conditional_detr_resnet50',
type=str,
help='Name of the CONDITIONAL_DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowercase = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 44
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase = logging.get_logger(__name__)
class _lowercase ( __a ):
def __init__( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]:
snake_case = feature_size
snake_case = sampling_rate
snake_case = padding_value
snake_case = kwargs.pop('''padding_side''' , '''right''' )
snake_case = kwargs.pop('''return_attention_mask''' , A__ )
super().__init__(**A__ )
def UpperCamelCase ( self , A__ , A__ = True , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
snake_case = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
snake_case = processed_features[self.model_input_names[0]]
snake_case = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A__ ) == 0:
if return_attention_mask:
snake_case = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
snake_case = required_input[0]
if isinstance(A__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
snake_case = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A__ ):
snake_case = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A__ ):
snake_case = '''tf'''
elif is_torch_tensor(A__ ):
snake_case = '''pt'''
elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ):
snake_case = '''np'''
else:
raise ValueError(
F"""type of {first_element} unknown: {type(A__ )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
snake_case = to_numpy(A__ )
else:
snake_case = [to_numpy(A__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
snake_case = self._get_padding_strategies(padding=A__ , max_length=A__ )
snake_case = processed_features[self.model_input_names[0]]
snake_case = len(A__ )
if not all(len(A__ ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
snake_case = []
for i in range(A__ ):
snake_case = {k: v[i] for k, v in processed_features.items()}
# truncation
snake_case = self._truncate(
A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , )
truncated_inputs.append(A__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
snake_case = PaddingStrategy.MAX_LENGTH
snake_case = {}
for i in range(A__ ):
# padding
snake_case = self._pad(
truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , )
for key, value in outputs.items():
if key not in batch_outputs:
snake_case = []
if value.dtype is np.dtype(np.floataa ):
snake_case = value.astype(np.floataa )
batch_outputs[key].append(A__ )
return BatchFeature(A__ , tensor_type=A__ )
def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> dict:
snake_case = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
snake_case = len(A__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
snake_case = np.ones(len(A__ ) , dtype=np.intaa )
if needs_to_be_padded:
snake_case = max_length - len(A__ )
if self.padding_side == "right":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
snake_case = np.pad(
A__ , A__ , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
snake_case = np.pad(
A__ , A__ , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , ) -> Union[str, Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
snake_case = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = len(A__ ) > max_length
if needs_to_be_truncated:
snake_case = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
snake_case = processed_features['''attention_mask'''][:max_length]
return processed_features
def UpperCamelCase ( self , A__=False , A__=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A__ , A__ ):
snake_case = PaddingStrategy(A__ )
elif isinstance(A__ , A__ ):
snake_case = padding
else:
snake_case = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 44
| 1
|
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def __UpperCamelCase ( a : Optional[int] ) ->Tuple:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() )
def __UpperCamelCase ( a : Dict , a : Optional[int] ) ->str:
snake_case = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
snake_case = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' )
snake_case = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' )
snake_case = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' )
snake_case = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' )
snake_case = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' )
snake_case = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' )
snake_case = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' )
snake_case = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' )
snake_case = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' )
snake_case = key.replace('''image_encoder.module''' , '''flava.image_model''' )
snake_case = key.replace('''text_encoder.module''' , '''flava.text_model''' )
snake_case = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' )
snake_case = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' )
snake_case = key.replace('''text_projection''' , '''flava.text_projection''' )
snake_case = key.replace('''image_projection''' , '''flava.image_projection''' )
snake_case = value.float()
for key, value in codebook_state_dict.items():
snake_case = value
return upgrade
@torch.no_grad()
def __UpperCamelCase ( a : Any , a : Dict , a : Dict , a : Any=None ) ->int:
if config_path is not None:
snake_case = FlavaConfig.from_pretrained(a )
else:
snake_case = FlavaConfig()
snake_case = FlavaForPreTraining(a ).eval()
snake_case = convert_dalle_checkpoint(a , a , save_checkpoint=a )
if os.path.exists(a ):
snake_case = torch.load(a , map_location='''cpu''' )
else:
snake_case = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )
snake_case = upgrade_state_dict(a , a )
hf_model.load_state_dict(a )
snake_case = hf_model.state_dict()
snake_case = count_parameters(a )
snake_case = count_parameters(a ) + count_parameters(a )
assert torch.allclose(a , a , atol=1e-3 )
hf_model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_lowercase = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 44
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _lowercase ( yaml.SafeLoader ):
def UpperCamelCase ( self , A__ ) -> List[str]:
snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value]
snake_case = [tuple(A__ ) if isinstance(A__ , A__ ) else key for key in keys]
snake_case = Counter(A__ )
snake_case = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCamelCase ( self , A__ , A__=False ) -> List[Any]:
snake_case = super().construct_mapping(A__ , deep=A__ )
self._check_no_duplicates_on_constructed_node(A__ )
return mapping
def __UpperCamelCase ( a : str ) ->Tuple[Optional[str], str]:
snake_case = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
snake_case = full_content[1:].index('''---''' ) + 1
snake_case = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class _lowercase ( __a ):
# class attributes
_UpperCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case , snake_case = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(A__ )
else:
return cls()
def UpperCamelCase ( self , A__ ) -> str:
if path.exists():
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case = readme_file.read()
else:
snake_case = None
snake_case = self._to_readme(A__ )
with open(A__ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(A__ )
def UpperCamelCase ( self , A__ = None ) -> str:
if readme_content is not None:
snake_case , snake_case = _split_yaml_from_readme(A__ )
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
snake_case = yaml.load(A__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
snake_case = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**A__ )
def UpperCamelCase ( self ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=A__ , allow_unicode=A__ , encoding='''utf-8''' , ).decode('''utf-8''' )
_lowercase = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowercase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
_lowercase = ap.parse_args()
_lowercase = Path(args.readme_filepath)
_lowercase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 44
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def __UpperCamelCase ( a : Tuple , a : str , a : Any ) ->Union[str, Any]:
# Construct model
if gpta_config_file == "":
snake_case = GPTaConfig()
else:
snake_case = GPTaConfig.from_json_file(a )
snake_case = GPTaModel(a )
# Load weights from numpy
load_tf_weights_in_gpta(a , a , a )
# Save pytorch-model
snake_case = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
snake_case = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , a )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(a , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
_lowercase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 44
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = CodeGenTokenizer
_UpperCAmelCase = CodeGenTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = {'''add_prefix_space''': True}
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
snake_case = dict(zip(A__ , range(len(A__ ) ) ) )
snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case = {'''unk_token''': '''<unk>'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
def UpperCamelCase ( self , **A__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , **A__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , A__ ) -> Tuple:
snake_case = '''lower newer'''
snake_case = '''lower newer'''
return input_text, output_text
def UpperCamelCase ( self ) -> List[Any]:
snake_case = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = '''lower newer'''
snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ )
self.assertListEqual(A__ , A__ )
snake_case = tokens + [tokenizer.unk_token]
snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer(add_prefix_space=A__ )
snake_case = '''lower newer'''
# Testing tokenization
snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids without special tokens
snake_case = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids with special tokens
snake_case = self.get_rust_tokenizer(add_prefix_space=A__ )
snake_case = tokenizer.encode(A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# Testing the unknown token
snake_case = tokens + [rust_tokenizer.unk_token]
snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> List[str]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCamelCase ( self , A__=15 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
def UpperCamelCase ( self ) -> Tuple:
snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input looooooooong''', '''This is a simple input''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
snake_case = tokenizer.pad_token_id
snake_case = tokenizer(A__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' )
snake_case = tokenizer(*A__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def UpperCamelCase ( self ) -> str:
snake_case = '''$$$'''
snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A__ , add_bos_token=A__ )
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = tokenizer.bos_token_id
snake_case = tokenizer(A__ )
snake_case = tokenizer(A__ )
self.assertEqual(out_s.input_ids[0] , A__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case = tokenizer.decode(out_s.input_ids )
snake_case = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
snake_case = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
snake_case = '''\nif len_a > len_b: result = a\nelse: result = b'''
snake_case = tokenizer.encode(A__ )
snake_case = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
snake_case = tokenizer.decode(A__ , truncate_before_pattern=A__ )
self.assertEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
pass
| 44
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( unittest.TestCase ):
def __init__( self , A__ , A__=7 , A__=3 , A__=18 , A__=30 , A__=4_00 , A__=True , A__=None , A__=True , A__=None , A__=True , ) -> Dict:
snake_case = size if size is not None else {'''shortest_edge''': 20}
snake_case = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = do_center_crop
snake_case = crop_size
snake_case = do_flip_channel_order
def UpperCamelCase ( self ) -> Tuple:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = MobileViTImageProcessor if is_vision_available() else None
def UpperCamelCase ( self ) -> str:
snake_case = MobileViTImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> Dict:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ , '''do_resize''' ) )
self.assertTrue(hasattr(A__ , '''size''' ) )
self.assertTrue(hasattr(A__ , '''do_center_crop''' ) )
self.assertTrue(hasattr(A__ , '''center_crop''' ) )
self.assertTrue(hasattr(A__ , '''do_flip_channel_order''' ) )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def UpperCamelCase ( self ) -> Any:
pass
def UpperCamelCase ( self ) -> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase ( self ) -> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def UpperCamelCase ( self ) -> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=A__ , torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
snake_case = image_processing(A__ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 44
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , ) -> List[Any]:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def UpperCamelCase ( self ) -> int:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> int:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = TFViTModel(config=A__ )
snake_case = model(A__ , training=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]:
snake_case = self.type_sequence_label_size
snake_case = TFViTForImageClassification(A__ )
snake_case = model(A__ , labels=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = TFViTForImageClassification(A__ )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> List[Any]:
snake_case = TFViTModelTester(self )
snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> str:
pass
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) )
def UpperCamelCase ( self ) -> List[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(A__ )
def __UpperCamelCase ( ) ->Any:
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=A__ , return_tensors='''tf''' )
# forward pass
snake_case = model(**A__ )
# verify the logits
snake_case = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A__ )
snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
| 44
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_lowercase = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def __UpperCamelCase ( a : Dict=True ) ->str:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__a ) )
class _lowercase ( __a ):
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCamelCase ( self , A__ , A__ ) -> str:
with TemporaryDirectory() as tmp_dir:
snake_case = dataset_module_factory(A__ , cache_dir=A__ )
snake_case = import_main_class(dataset_module.module_path , dataset=A__ )
snake_case = builder_cls(
cache_dir=A__ , config_name=A__ , hash=dataset_module.hash , )
snake_case = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=A__ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
snake_case = cached_path(A__ , cache_dir=A__ )
self.assertTrue(os.path.exists(A__ ) )
@pytest.mark.integration
def __UpperCamelCase ( a : List[str] ) ->Any:
snake_case = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a )
snake_case = import_main_class(dataset_module.module_path )
snake_case = builder_cls(
cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
snake_case = None
builder_instance.download_and_prepare()
snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __UpperCamelCase ( a : Any ) ->Union[str, Any]:
snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a )
snake_case = import_main_class(dataset_module.module_path , dataset=a )
snake_case = builder_cls(
cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , )
snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a , a )
assert "train" in ds
assert isinstance(ds['''train'''] , a )
assert next(iter(ds['''train'''] ) )
| 44
| 1
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , ) -> List[Any]:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def UpperCamelCase ( self ) -> int:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> int:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = TFViTModel(config=A__ )
snake_case = model(A__ , training=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]:
snake_case = self.type_sequence_label_size
snake_case = TFViTForImageClassification(A__ )
snake_case = model(A__ , labels=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = TFViTForImageClassification(A__ )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> List[Any]:
snake_case = TFViTModelTester(self )
snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> str:
pass
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) )
def UpperCamelCase ( self ) -> List[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(A__ )
def __UpperCamelCase ( ) ->Any:
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=A__ , return_tensors='''tf''' )
# forward pass
snake_case = model(**A__ )
# verify the logits
snake_case = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A__ )
snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
| 44
|
'''simple docstring'''
def __UpperCamelCase ( a : int , a : int ) ->int:
while b:
snake_case , snake_case = b, a % b
return a
def __UpperCamelCase ( a : int , a : int ) ->int:
return a if b == 0 else euclidean_gcd_recursive(a , a % b )
def __UpperCamelCase ( ) ->Optional[Any]:
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 44
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = GPTSanJapaneseTokenizer
_UpperCAmelCase = False
_UpperCAmelCase = {'''do_clean_text''': False, '''add_prefix_space''': False}
def UpperCamelCase ( self ) -> int:
super().setUp()
# fmt: off
snake_case = ['''こん''', '''こんに''', '''にちは''', '''ばんは''', '''世界,㔺界''', '''、''', '''。''', '''<BR>''', '''<SP>''', '''<TAB>''', '''<URL>''', '''<EMAIL>''', '''<TEL>''', '''<DATE>''', '''<PRICE>''', '''<BLOCK>''', '''<KIGOU>''', '''<U2000U2BFF>''', '''<|emoji1|>''', '''<unk>''', '''<|bagoftoken|>''', '''<|endoftext|>''']
# fmt: on
snake_case = {'''emoji''': {'''\ud83d\ude00''': '''<|emoji1|>'''}, '''emoji_inv''': {'''<|emoji1|>''': '''\ud83d\ude00'''}} # 😀
snake_case = {'''unk_token''': '''<unk>'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''emoji_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.emoji_file , '''w''' ) as emoji_writer:
emoji_writer.write(json.dumps(A__ ) )
def UpperCamelCase ( self , **A__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , A__ ) -> Any:
snake_case = '''こんにちは、世界。 \nこんばんは、㔺界。😀'''
snake_case = '''こんにちは、世界。 \nこんばんは、世界。😀'''
return input_text, output_text
def UpperCamelCase ( self , A__ ) -> str:
snake_case , snake_case = self.get_input_output_texts(A__ )
snake_case = tokenizer.encode(A__ , add_special_tokens=A__ )
snake_case = tokenizer.decode(A__ , clean_up_tokenization_spaces=A__ )
return text, ids
def UpperCamelCase ( self ) -> List[Any]:
pass # TODO add if relevant
def UpperCamelCase ( self ) -> int:
pass # TODO add if relevant
def UpperCamelCase ( self ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCamelCase ( self ) -> Dict:
snake_case = self.get_tokenizer()
# Testing tokenization
snake_case = '''こんにちは、世界。 こんばんは、㔺界。'''
snake_case = ['''こん''', '''にちは''', '''、''', '''世界''', '''。''', '''<SP>''', '''こん''', '''ばんは''', '''、''', '''㔺界''', '''。''']
snake_case = tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids without special tokens
snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
snake_case = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids with special tokens
snake_case = tokens + [tokenizer.unk_token]
snake_case = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
snake_case = tokenizer.convert_tokens_to_ids(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.get_tokenizer()
# Testing tokenization
snake_case = '''こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'''
snake_case = '''こんにちは、、、、世界。こんばんは、、、、世界。'''
snake_case = tokenizer.encode(A__ )
snake_case = tokenizer.decode(A__ )
self.assertEqual(A__ , A__ )
@slow
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
snake_case = '''こんにちは、世界。'''
snake_case = '''こんばんは、㔺界。😀'''
snake_case = '''こんにちは、世界。こんばんは、世界。😀'''
snake_case = tokenizer.encode(prefix_text + input_text )
snake_case = tokenizer.encode('''''' , prefix_text=prefix_text + input_text )
snake_case = tokenizer.encode(A__ , prefix_text=A__ )
snake_case = tokenizer.decode(A__ )
snake_case = tokenizer.decode(A__ )
snake_case = tokenizer.decode(A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
self.assertEqual(A__ , A__ )
@slow
def UpperCamelCase ( self ) -> int:
snake_case = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
# Testing tokenization
snake_case = '''こんにちは、世界。'''
snake_case = '''こんばんは、㔺界。😀'''
snake_case = len(tokenizer.encode(A__ ) ) - 2
snake_case = len(tokenizer.encode(A__ ) ) - 2
snake_case = [1] + [0] * (len_prefix + len_text + 1)
snake_case = [1] * (len_prefix + len_text + 1) + [0]
snake_case = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
snake_case = tokenizer(prefix_text + input_text ).token_type_ids
snake_case = tokenizer('''''' , prefix_text=prefix_text + input_text ).token_type_ids
snake_case = tokenizer(A__ , prefix_text=A__ ).token_type_ids
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
self.assertListEqual(A__ , A__ )
@slow
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
snake_case = tokenizer.encode('''あンいワ''' )
snake_case = tokenizer.encode('''''' , prefix_text='''あンいワ''' )
snake_case = tokenizer.encode('''いワ''' , prefix_text='''あン''' )
self.assertEqual(tokenizer.decode(A__ ) , tokenizer.decode(A__ ) )
self.assertEqual(tokenizer.decode(A__ ) , tokenizer.decode(A__ ) )
self.assertNotEqual(A__ , A__ )
self.assertNotEqual(A__ , A__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.tokenizer_class.from_pretrained('''Tanrei/GPTSAN-japanese''' )
snake_case = [['''武田信玄''', '''は、'''], ['''織田信長''', '''の配下の、''']]
snake_case = tokenizer(A__ , padding=A__ )
snake_case = tokenizer.batch_encode_plus(A__ , padding=A__ )
# fmt: off
snake_case = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
snake_case = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
snake_case = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , A__ )
self.assertListEqual(x_token.token_type_ids , A__ )
self.assertListEqual(x_token.attention_mask , A__ )
self.assertListEqual(x_token_a.input_ids , A__ )
self.assertListEqual(x_token_a.token_type_ids , A__ )
self.assertListEqual(x_token_a.attention_mask , A__ )
def UpperCamelCase ( self ) -> Optional[int]:
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def UpperCamelCase ( self ) -> Optional[int]:
# tokenizer has no padding token
pass
| 44
|
'''simple docstring'''
import argparse
import copy
def __UpperCamelCase ( a : Union[str, Any] ) ->Tuple:
snake_case = {}
with open(a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[1], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[0], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __UpperCamelCase ( a : Dict , a : Tuple ) ->int:
with open(a ) as f:
snake_case = f.read(1 )
snake_case = start_node
snake_case = []
snake_case = start_node
snake_case = 0
while visiting not in first_solution:
snake_case = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(a ) and k[0] not in first_solution:
snake_case = k[1]
snake_case = k[0]
first_solution.append(a )
snake_case = distance_of_first_solution + int(a )
snake_case = best_node
first_solution.append(a )
snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __UpperCamelCase ( a : Optional[int] , a : str ) ->str:
snake_case = []
for n in solution[1:-1]:
snake_case = solution.index(a )
for kn in solution[1:-1]:
snake_case = solution.index(a )
if n == kn:
continue
snake_case = copy.deepcopy(a )
snake_case = kn
snake_case = n
snake_case = 0
for k in _tmp[:-1]:
snake_case = _tmp[_tmp.index(a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case = distance + int(i[1] )
_tmp.append(a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __UpperCamelCase ( a : Any , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] ) ->List[Any]:
snake_case = 1
snake_case = first_solution
snake_case = []
snake_case = distance_of_first_solution
snake_case = solution
while count <= iters:
snake_case = find_neighborhood(a , a )
snake_case = 0
snake_case = neighborhood[index_of_best_solution]
snake_case = len(a ) - 1
snake_case = False
while not found:
snake_case = 0
while i < len(a ):
if best_solution[i] != solution[i]:
snake_case = best_solution[i]
snake_case = solution[i]
break
snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case = True
snake_case = best_solution[:-1]
snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case = cost
snake_case = solution
else:
snake_case = index_of_best_solution + 1
snake_case = neighborhood[index_of_best_solution]
if len(a ) >= size:
tabu_list.pop(0 )
snake_case = count + 1
return best_solution_ever, best_cost
def __UpperCamelCase ( a : Union[str, Any]=None ) ->Optional[Any]:
snake_case = generate_neighbours(args.File )
snake_case , snake_case = generate_first_solution(
args.File , a )
snake_case , snake_case = tabu_search(
a , a , a , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 44
| 1
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mvp import MvpTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all MVP models at https://huggingface.co/models?filter=mvp
_lowercase = {
'vocab_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json',
},
'added_tokens.json': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json',
},
'merges_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt',
},
'tokenizer_file': {
'RUCAIBox/mvp': 'https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json',
},
}
_lowercase = {
'RUCAIBox/mvp': 1_024,
}
class _lowercase ( __a ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ['''input_ids''', '''attention_mask''']
_UpperCAmelCase = MvpTokenizer
def __init__( self , A__=None , A__=None , A__=None , A__="replace" , A__="<s>" , A__="</s>" , A__="</s>" , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__=False , A__=True , **A__ , ) -> Optional[int]:
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A__ ) != add_prefix_space:
snake_case = getattr(A__ , pre_tok_state.pop('''type''' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**A__ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = '''post_processor'''
snake_case = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['''sep'''] )
if "cls" in state:
snake_case = tuple(state['''cls'''] )
snake_case = False
if state.get('''add_prefix_space''' , A__ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('''trim_offsets''' , A__ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(A__ , state.pop('''type''' ) )
snake_case = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
def UpperCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
snake_case = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
snake_case = value
def UpperCamelCase ( self , *A__ , **A__ ) -> BatchEncoding:
snake_case = kwargs.get('''is_split_into_words''' , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> BatchEncoding:
snake_case = kwargs.get('''is_split_into_words''' , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*A__ , **A__ )
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCamelCase ( self , A__ , A__=None ) -> List[str]:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 44
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 44
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class _lowercase ( __a ):
_UpperCAmelCase = '''funnel'''
_UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self , A__=3_05_22 , A__=[4, 4, 4] , A__=None , A__=2 , A__=7_68 , A__=12 , A__=64 , A__=30_72 , A__="gelu_new" , A__=0.1 , A__=0.1 , A__=0.0 , A__=0.1 , A__=None , A__=1e-9 , A__="mean" , A__="relative_shift" , A__=True , A__=True , A__=True , **A__ , ) -> str:
snake_case = vocab_size
snake_case = block_sizes
snake_case = [1] * len(A__ ) if block_repeats is None else block_repeats
assert len(A__ ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
snake_case = num_decoder_layers
snake_case = d_model
snake_case = n_head
snake_case = d_head
snake_case = d_inner
snake_case = hidden_act
snake_case = hidden_dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = initializer_range
snake_case = initializer_std
snake_case = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
snake_case = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
snake_case = attention_type
snake_case = separate_cls
snake_case = truncate_seq
snake_case = pool_q_only
super().__init__(**A__ )
@property
def UpperCamelCase ( self ) -> Any:
return sum(self.block_sizes )
@num_hidden_layers.setter
def UpperCamelCase ( self , A__ ) -> Tuple:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def UpperCamelCase ( self ) -> Tuple:
return len(self.block_sizes )
@num_blocks.setter
def UpperCamelCase ( self , A__ ) -> List[str]:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 44
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowercase ( __a ):
_UpperCAmelCase = '''WhisperFeatureExtractor'''
_UpperCAmelCase = '''WhisperTokenizer'''
def __init__( self , A__ , A__ ) -> Optional[Any]:
super().__init__(A__ , A__ )
snake_case = self.feature_extractor
snake_case = False
def UpperCamelCase ( self , A__=None , A__=None , A__=True ) -> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=A__ , language=A__ , no_timestamps=A__ )
def __call__( self , *A__ , **A__ ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
snake_case = kwargs.pop('''audio''' , A__ )
snake_case = kwargs.pop('''sampling_rate''' , A__ )
snake_case = kwargs.pop('''text''' , A__ )
if len(A__ ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
snake_case = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
snake_case = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> str:
return self.tokenizer.decode(*A__ , **A__ )
def UpperCamelCase ( self , A__ , A__="np" ) -> Optional[Any]:
return self.tokenizer.get_prompt_ids(A__ , return_tensors=A__ )
| 44
| 1
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowercase :
def __init__( self , A__ , A__=2 , A__=True , A__=False , A__=10 , A__=3 , A__=32 * 8 , A__=32 * 8 , A__=4 , A__=64 , ) -> Union[str, Any]:
snake_case = parent
snake_case = batch_size
snake_case = is_training
snake_case = use_auxiliary_loss
snake_case = num_queries
snake_case = num_channels
snake_case = min_size
snake_case = max_size
snake_case = num_labels
snake_case = hidden_dim
snake_case = hidden_dim
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A__ )
snake_case = torch.ones([self.batch_size, self.min_size, self.max_size] , device=A__ )
snake_case = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=A__ ) > 0.5
).float()
snake_case = (torch.rand((self.batch_size, self.num_labels) , device=A__ ) > 0.5).long()
snake_case = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
snake_case = self.num_queries
snake_case = self.num_labels
snake_case = [1, 1, 1, 1]
snake_case = self.num_channels
snake_case = 64
snake_case = 1_28
snake_case = self.hidden_dim
snake_case = self.hidden_dim
snake_case = self.hidden_dim
return config
def UpperCamelCase ( self ) -> Dict:
snake_case , snake_case , snake_case , snake_case , snake_case = self.prepare_config_and_inputs()
snake_case = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCamelCase ( self , A__ , A__ ) -> Union[str, Any]:
snake_case = output.encoder_hidden_states
snake_case = output.pixel_decoder_hidden_states
snake_case = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A__ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A__ ) , config.decoder_layers )
def UpperCamelCase ( self , A__ , A__ , A__ , A__=False ) -> str:
with torch.no_grad():
snake_case = MaskaFormerModel(config=A__ )
model.to(A__ )
model.eval()
snake_case = model(pixel_values=A__ , pixel_mask=A__ )
snake_case = model(A__ , output_hidden_states=A__ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A__ , A__ )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> List[str]:
snake_case = MaskaFormerForUniversalSegmentation(config=A__ )
model.to(A__ )
model.eval()
def comm_check_on_output(A__ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
snake_case = model(pixel_values=A__ , pixel_mask=A__ )
snake_case = model(A__ )
comm_check_on_output(A__ )
snake_case = model(
pixel_values=A__ , pixel_mask=A__ , mask_labels=A__ , class_labels=A__ )
comm_check_on_output(A__ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> str:
snake_case = MaskaFormerModelTester(self )
snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> str:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A__ , **A__ , output_hidden_states=A__ )
def UpperCamelCase ( self ) -> str:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A__ )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> Dict:
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def UpperCamelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def UpperCamelCase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def UpperCamelCase ( self ) -> List[str]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCamelCase ( self ) -> List[Any]:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase ( self ) -> str:
pass
def UpperCamelCase ( self ) -> str:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
@slow
def UpperCamelCase ( self ) -> List[str]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
snake_case = MaskaFormerModel.from_pretrained(A__ )
self.assertIsNotNone(A__ )
def UpperCamelCase ( self ) -> Dict:
snake_case = (self.model_tester.min_size,) * 2
snake_case = {
'''pixel_values''': torch.randn((2, 3, *size) , device=A__ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=A__ ),
'''class_labels''': torch.zeros(2 , 10 , device=A__ ).long(),
}
snake_case = self.model_tester.get_config()
snake_case = MaskaFormerForUniversalSegmentation(A__ ).to(A__ )
snake_case = model(**A__ )
self.assertTrue(outputs.loss is not None )
def UpperCamelCase ( self ) -> Optional[int]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A__ , **A__ , output_hidden_states=A__ )
def UpperCamelCase ( self ) -> List[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ ).to(A__ )
snake_case = model(**A__ , output_attentions=A__ )
self.assertTrue(outputs.attentions is not None )
def UpperCamelCase ( self ) -> int:
if not self.model_tester.is_training:
return
snake_case = self.all_model_classes[1]
snake_case , snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
snake_case = model_class(A__ )
model.to(A__ )
model.train()
snake_case = model(A__ , mask_labels=A__ , class_labels=A__ ).loss
loss.backward()
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.all_model_classes[1]
snake_case , snake_case , snake_case , snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
snake_case = True
snake_case = True
snake_case = model_class(A__ ).to(A__ )
model.train()
snake_case = model(A__ , mask_labels=A__ , class_labels=A__ )
snake_case = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
snake_case = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
snake_case = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
snake_case = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A__ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase = 1E-4
def __UpperCamelCase ( ) ->Union[str, Any]:
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ) -> Tuple:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCamelCase ( self ) -> str:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A__ )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(A__ , return_tensors='''pt''' ).to(A__ )
snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
snake_case = model(**A__ )
snake_case = torch.tensor(
[[-0.2_7_9_0, -1.0_7_1_7, -1.1_6_6_8], [-0.5_1_2_8, -0.3_1_2_8, -0.4_9_8_7], [-0.5_8_3_2, 0.1_9_7_1, -0.0_1_9_7]] ).to(A__ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , A__ , atol=A__ ) )
snake_case = torch.tensor(
[[0.8_9_7_3, 1.1_8_4_7, 1.1_7_7_6], [1.1_9_3_4, 1.5_0_4_0, 1.5_1_2_8], [1.1_1_5_3, 1.4_4_8_6, 1.4_9_5_1]] ).to(A__ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , A__ , atol=A__ ) )
snake_case = torch.tensor(
[[2.1_1_5_2, 1.7_0_0_0, -0.8_6_0_3], [1.5_8_0_8, 1.8_0_0_4, -0.9_3_5_3], [1.6_0_4_3, 1.7_4_9_5, -0.5_9_9_9]] ).to(A__ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , A__ , atol=A__ ) )
def UpperCamelCase ( self ) -> str:
snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A__ ).eval()
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(A__ , return_tensors='''pt''' ).to(A__ )
snake_case = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A__ , (1, 3, 3_84, 3_84) )
with torch.no_grad():
snake_case = model(**A__ )
# masks_queries_logits
snake_case = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
snake_case = [
[-8.7_8_3_9, -9.0_0_5_6, -8.8_1_2_1],
[-7.4_1_0_4, -7.0_3_1_3, -6.5_4_0_1],
[-6.6_1_0_5, -6.3_4_2_7, -6.4_6_7_5],
]
snake_case = torch.tensor(A__ ).to(A__ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , A__ , atol=A__ ) )
# class_queries_logits
snake_case = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
snake_case = torch.tensor(
[
[1.8_3_2_4, -8.0_8_3_5, -4.1_9_2_2],
[0.8_4_5_0, -9.0_0_5_0, -3.6_0_5_3],
[0.3_0_4_5, -7.7_2_9_3, -3.0_2_7_5],
] ).to(A__ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , A__ , atol=A__ ) )
def UpperCamelCase ( self ) -> List[str]:
snake_case = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A__ ).eval()
snake_case = self.default_image_processor
snake_case = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
snake_case = inputs['''pixel_values'''].to(A__ )
snake_case = [el.to(A__ ) for el in inputs['''mask_labels''']]
snake_case = [el.to(A__ ) for el in inputs['''class_labels''']]
with torch.no_grad():
snake_case = model(**A__ )
self.assertTrue(outputs.loss is not None )
| 44
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowercase ( __a ):
_UpperCAmelCase = '''char'''
_UpperCAmelCase = '''bpe'''
_UpperCAmelCase = '''wp'''
_lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowercase ( __a ):
_UpperCAmelCase = ['''image_processor''', '''char_tokenizer''']
_UpperCAmelCase = '''ViTImageProcessor'''
_UpperCAmelCase = '''MgpstrTokenizer'''
def __init__( self , A__=None , A__=None , **A__ ) -> List[Any]:
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A__ , )
snake_case = kwargs.pop('''feature_extractor''' )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
snake_case = tokenizer
snake_case = AutoTokenizer.from_pretrained('''gpt2''' )
snake_case = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(A__ , A__ )
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> List[str]:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None:
snake_case = self.char_tokenizer(A__ , return_tensors=A__ , **A__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self , A__ ) -> Dict:
snake_case , snake_case , snake_case = sequences
snake_case = char_preds.size(0 )
snake_case , snake_case = self._decode_helper(A__ , '''char''' )
snake_case , snake_case = self._decode_helper(A__ , '''bpe''' )
snake_case , snake_case = self._decode_helper(A__ , '''wp''' )
snake_case = []
snake_case = []
for i in range(A__ ):
snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case = scores.index(max(A__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case = {}
snake_case = final_strs
snake_case = final_scores
snake_case = char_strs
snake_case = bpe_strs
snake_case = wp_strs
return out
def UpperCamelCase ( self , A__ , A__ ) -> Optional[Any]:
if format == DecodeType.CHARACTER:
snake_case = self.char_decode
snake_case = 1
snake_case = '''[s]'''
elif format == DecodeType.BPE:
snake_case = self.bpe_decode
snake_case = 2
snake_case = '''#'''
elif format == DecodeType.WORDPIECE:
snake_case = self.wp_decode
snake_case = 1_02
snake_case = '''[SEP]'''
else:
raise ValueError(F"""Format {format} is not supported.""" )
snake_case , snake_case = [], []
snake_case = pred_logits.size(0 )
snake_case = pred_logits.size(1 )
snake_case , snake_case = pred_logits.topk(1 , dim=-1 , largest=A__ , sorted=A__ )
snake_case = preds_index.view(-1 , A__ )[:, 1:]
snake_case = decoder(A__ )
snake_case , snake_case = torch.nn.functional.softmax(A__ , dim=2 ).max(dim=2 )
snake_case = preds_max_prob[:, 1:]
for index in range(A__ ):
snake_case = preds_str[index].find(A__ )
snake_case = preds_str[index][:pred_eos]
snake_case = preds_index[index].cpu().tolist()
snake_case = pred_index.index(A__ ) if eos_token in pred_index else -1
snake_case = preds_max_prob[index][: pred_eos_index + 1]
snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A__ )
conf_scores.append(A__ )
return dec_strs, conf_scores
def UpperCamelCase ( self , A__ ) -> int:
snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(A__ )]
return decode_strs
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.bpe_tokenizer.batch_decode(A__ )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(A__ )]
return decode_strs
| 44
| 1
|
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( __a ):
def __init__( self , A__ , A__ ) -> str:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=A__ , scheduler=A__ )
@torch.no_grad()
def __call__( self , A__ = 1 , A__ = None , A__ = 0.0 , A__ = 50 , A__ = None , A__ = "pil" , A__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , A__ ):
snake_case = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
snake_case = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(A__ , A__ ) and len(A__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(A__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
snake_case = randn_tensor(A__ , generator=A__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(A__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case = self.unet(A__ , A__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case = self.scheduler.step(
A__ , A__ , A__ , eta=A__ , use_clipped_model_output=A__ , generator=A__ ).prev_sample
snake_case = (image / 2 + 0.5).clamp(0 , 1 )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case = self.numpy_to_pil(A__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A__ )
| 44
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_lowercase , _lowercase , _lowercase = False, False, False
@dataclass
class _lowercase :
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = None
# Automatically constructed
_UpperCAmelCase = "dict"
_UpperCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_UpperCAmelCase = field(default='''Audio''' , init=__a , repr=__a )
def __call__( self ) -> Optional[Any]:
return self.pa_type
def UpperCamelCase ( self , A__ ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(A__ , A__ ):
return {"bytes": None, "path": value}
elif isinstance(A__ , A__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case = BytesIO()
sf.write(A__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
snake_case = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67
snake_case = BytesIO(bytes() )
sf.write(A__ , A__ , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def UpperCamelCase ( self , A__ , A__ = None ) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
snake_case , snake_case = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
snake_case = xsplitext(A__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
snake_case = token_per_repo_id or {}
snake_case = path.split('''::''' )[-1]
try:
snake_case = string_to_dict(A__ , config.HUB_DATASETS_URL )['''repo_id''']
snake_case = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case = None
with xopen(A__ , '''rb''' , use_auth_token=A__ ) as f:
snake_case , snake_case = sf.read(A__ )
else:
snake_case , snake_case = sf.read(A__ )
snake_case = array.T
if self.mono:
snake_case = librosa.to_mono(A__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case = librosa.resample(A__ , orig_sr=A__ , target_sr=self.sampling_rate )
snake_case = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def UpperCamelCase ( self , A__ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
snake_case = pa.array([None] * len(A__ ) , type=pa.binary() )
snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case = pa.array([None] * len(A__ ) , type=pa.string() )
snake_case = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
snake_case = pa.array([Audio().encode_example(A__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
snake_case = storage.field('''bytes''' )
else:
snake_case = pa.array([None] * len(A__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
snake_case = storage.field('''path''' )
else:
snake_case = pa.array([None] * len(A__ ) , type=pa.string() )
snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(A__ , self.pa_type )
def UpperCamelCase ( self , A__ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(A__ ):
with xopen(A__ , '''rb''' ) as f:
snake_case = f.read()
return bytes_
snake_case = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case = pa.array(
[os.path.basename(A__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(A__ , self.pa_type )
| 44
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def __UpperCamelCase ( ) ->List[str]:
snake_case = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' )
snake_case = parser.add_subparsers(help='''transformers-cli command helpers''' )
# Register commands
ConvertCommand.register_subcommand(a )
DownloadCommand.register_subcommand(a )
EnvironmentCommand.register_subcommand(a )
RunCommand.register_subcommand(a )
ServeCommand.register_subcommand(a )
UserCommands.register_subcommand(a )
AddNewModelCommand.register_subcommand(a )
AddNewModelLikeCommand.register_subcommand(a )
LfsCommands.register_subcommand(a )
PTtoTFCommand.register_subcommand(a )
# Let's go
snake_case = parser.parse_args()
if not hasattr(a , '''func''' ):
parser.print_help()
exit(1 )
# Run
snake_case = args.func(a )
service.run()
if __name__ == "__main__":
main()
| 44
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _lowercase :
@staticmethod
def UpperCamelCase ( *A__ , **A__ ) -> List[Any]:
pass
def __UpperCamelCase ( a : Image ) ->str:
snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase ( self , A__ , A__ ) -> List[Any]:
snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ )
import datasets
snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
snake_case = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , A__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def UpperCamelCase ( self ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase ( self ) -> Dict:
snake_case = '''Intel/dpt-large'''
snake_case = pipeline('''depth-estimation''' , model=A__ )
snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
snake_case = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def UpperCamelCase ( self ) -> Any:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 44
| 1
|
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def __UpperCamelCase ( a : float , a : float , a : float ) ->dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(a , 2 ) - pow(a , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(a , 2 ) - pow(a , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(a , 2 ) + pow(a , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __UpperCamelCase ( a : Optional[int] ) ->Dict:
snake_case = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a , a )
def __UpperCamelCase ( a : Optional[Any] ) ->int:
snake_case = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
snake_case = s_dict.pop(a )
elif "subsample" in key:
snake_case = s_dict.pop(a )
def __UpperCamelCase ( a : Optional[int] ) ->Optional[int]:
snake_case , snake_case = emb.weight.shape
snake_case = nn.Linear(a , a , bias=a )
snake_case = emb.weight.data
return lin_layer
def __UpperCamelCase ( a : Any , a : Tuple ) ->Tuple:
snake_case = torch.load(a , map_location='''cpu''' )
snake_case = mam_aaa['''args''']
snake_case = mam_aaa['''model''']
snake_case = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(a )
rename_keys(a )
snake_case = state_dict['''decoder.embed_tokens.weight'''].shape[0]
snake_case = args.share_decoder_input_output_embed
snake_case = [int(a ) for i in args.conv_kernel_sizes.split(''',''' )]
snake_case = SpeechaTextConfig(
vocab_size=a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(a ) , conv_channels=args.conv_channels , conv_kernel_sizes=a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=a , num_beams=5 , max_length=200 , use_cache=a , decoder_start_token_id=2 , early_stopping=a , )
snake_case = SpeechaTextForConditionalGeneration(a )
snake_case , snake_case = model.model.load_state_dict(a , strict=a )
if len(a ) > 0 and not set(a ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
snake_case = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case = lm_head_weights
model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowercase = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 44
| 1
|
'''simple docstring'''
def __UpperCamelCase ( a : int ) ->list:
snake_case = int(a )
if n_element < 1:
snake_case = ValueError('''a should be a positive number''' )
raise my_error
snake_case = [1]
snake_case , snake_case , snake_case = (0, 0, 0)
snake_case = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
_lowercase = input('Enter the last number (nth term) of the Hamming Number Series: ')
print('Formula of Hamming Number Series => 2^i * 3^j * 5^k')
_lowercase = hamming(int(n))
print('-----------------------------------------------------')
print(f'The list with nth numbers is: {hamming_numbers}')
print('-----------------------------------------------------')
| 44
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=__a ):
_UpperCAmelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *A__ , **A__ ) -> Union[str, Any]:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Optional[Any]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Any:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 44
| 1
|
'''simple docstring'''
from ....utils import logging
_lowercase = logging.get_logger(__name__)
class _lowercase ( __a ):
def __init__( self , A__ , A__=None , A__=20_48 ) -> Tuple:
snake_case = config.__dict__
snake_case = modal_hidden_size
if num_labels:
snake_case = num_labels
| 44
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _lowercase :
def __init__( self , A__ ) -> None:
snake_case = value
snake_case = None
snake_case = None
class _lowercase :
def __init__( self , A__ ) -> None:
snake_case = tree
def UpperCamelCase ( self , A__ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( a : float , a : float , a : float , ) ->tuple[str, float]:
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_lowercase = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def __UpperCamelCase ( a : List[str] ) ->Optional[int]:
snake_case = torch.load(a , map_location='''cpu''' )
return sd
def __UpperCamelCase ( a : Optional[int] , a : Union[str, Any] , a : int=rename_keys_prefix ) ->Tuple:
snake_case = OrderedDict()
snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case = key
for name_pair in rename_keys_prefix:
snake_case = new_key.replace(name_pair[0] , name_pair[1] )
snake_case = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def __UpperCamelCase ( a : Optional[int] , a : int ) ->Union[str, Any]:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
snake_case = '''pretraining'''
if "vcr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 512}
snake_case = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
snake_case = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
snake_case = '''vqa'''
elif "nlvr" in checkpoint_path:
snake_case = {
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
snake_case = '''nlvr'''
snake_case = VisualBertConfig(**a )
# Load State Dict
snake_case = load_state_dict(a )
snake_case = get_new_dict(a , a )
if model_type == "pretraining":
snake_case = VisualBertForPreTraining(a )
elif model_type == "vqa":
snake_case = VisualBertForQuestionAnswering(a )
elif model_type == "nlvr":
snake_case = VisualBertForVisualReasoning(a )
elif model_type == "multichoice":
snake_case = VisualBertForMultipleChoice(a )
model.load_state_dict(a )
# Save Checkpoints
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_lowercase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 1
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( a : list ) ->list:
if len(a ) == 0:
return []
snake_case , snake_case = min(a ), max(a )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(a )]
for i in my_list:
buckets[int(i - min_value )].append(a )
return [v for bucket in buckets for v in sorted(a )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 44
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]:
snake_case = original_name.split('''.''' )[0]
snake_case = key.split('''.''' )
snake_case = int(key_list[key_list.index(a ) - 2] )
snake_case = int(key_list[key_list.index(a ) - 1] )
snake_case = orig_block_num - offset
snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def __UpperCamelCase ( a : Tuple ) ->Dict:
snake_case = OrderedDict()
snake_case , snake_case = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case = key[: key.find('''proj''' )]
snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" )
snake_case = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case = key.replace('''head''' , '''classifier''' )
snake_case = value
return new_state_dict
def __UpperCamelCase ( ) ->Optional[int]:
snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case = Image.open(requests.get(a , stream=a ).raw )
return image
@torch.no_grad()
def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]:
snake_case = PoolFormerConfig()
# set attributes based on model_name
snake_case = '''huggingface/label-files'''
snake_case = model_name[-3:]
snake_case = 1000
snake_case = '''imagenet-1k-id2label.json'''
snake_case = (1, 1000)
# set config attributes
snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
snake_case = {int(a ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case = [2, 2, 6, 2]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s24":
snake_case = [4, 4, 12, 4]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s36":
snake_case = [6, 6, 18, 6]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.9
elif size == "m36":
snake_case = [6, 6, 18, 6]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
elif size == "m48":
snake_case = [8, 8, 24, 8]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
# Prepare image
snake_case = prepare_img()
snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
snake_case = torch.load(a , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case = rename_keys(a )
# create HuggingFace model and load state dict
snake_case = PoolFormerForImageClassification(a )
model.load_state_dict(a )
model.eval()
# Define image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case = model(a )
snake_case = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
snake_case = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
snake_case = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
snake_case = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowercase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 1
|
'''simple docstring'''
def __UpperCamelCase ( a : list[int] , a : list[int] , a : int ) ->bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(a ) )
def __UpperCamelCase ( a : list[list[int]] , a : int , a : list[int] , a : int ) ->bool:
# Base Case
if index == len(a ):
return True
# Recursive Step
for i in range(a ):
if valid_coloring(graph[index] , a , a ):
# Color current vertex
snake_case = i
# Validate coloring
if util_color(a , a , a , index + 1 ):
return True
# Backtrack
snake_case = -1
return False
def __UpperCamelCase ( a : list[list[int]] , a : int ) ->list[int]:
snake_case = [-1] * len(a )
if util_color(a , a , a , 0 ):
return colored_vertices
return []
| 44
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowercase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowercase = logging.getLogger()
def __UpperCamelCase ( ) ->Tuple:
snake_case = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case = parser.parse_args()
return args.f
def __UpperCamelCase ( a : Dict , a : Tuple="eval" ) ->List[Any]:
snake_case = os.path.join(a , f"""{split}_results.json""" )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
return json.load(a )
raise ValueError(f"""can't find {path}""" )
_lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowercase ( __a ):
def UpperCamelCase ( self ) -> List[str]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_flax_glue.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_clm_flax.main()
snake_case = get_results(A__ )
self.assertLess(result['''eval_perplexity'''] , 1_00 )
@slow
def UpperCamelCase ( self ) -> int:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_summarization_flax.main()
snake_case = get_results(A__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_mlm_flax.main()
snake_case = get_results(A__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_ta_mlm_flax.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def UpperCamelCase ( self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case = 7 if get_gpu_count() > 1 else 2
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_flax_ner.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_qa.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 44
| 1
|
'''simple docstring'''
def __UpperCamelCase ( a : Optional[int] ) ->str:
snake_case = len(a )
snake_case = sum(a )
snake_case = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
snake_case = True
for i in range(1 , s + 1 ):
snake_case = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
snake_case = dp[i][j - 1]
if arr[i - 1] <= j:
snake_case = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
snake_case = s - 2 * j
break
return diff
| 44
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_lowercase = logging.get_logger(__name__)
_lowercase = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _lowercase ( __a ):
def __init__( self , A__=None , A__=None , *A__ , **A__ ) -> Union[str, Any]:
super().__init__(*A__ , **A__ )
if config is None:
assert isinstance(self.model , A__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
snake_case = self.model.config
else:
snake_case = config
snake_case = data_args
snake_case = self.config.tgt_vocab_size if isinstance(self.config , A__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case = label_smoothed_nll_loss
def UpperCamelCase ( self , A__ ) -> Tuple:
if self.optimizer is None:
snake_case = ['''bias''', '''LayerNorm.weight''']
snake_case = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
snake_case = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case = Adafactor
snake_case = {'''scale_parameter''': False, '''relative_step''': False}
else:
snake_case = AdamW
snake_case = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
snake_case = self.args.learning_rate
if self.sharded_ddp:
snake_case = OSS(
params=A__ , optim=A__ , **A__ , )
else:
snake_case = optimizer_cls(A__ , **A__ )
if self.lr_scheduler is None:
snake_case = self._get_lr_scheduler(A__ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def UpperCamelCase ( self , A__ ) -> Tuple:
snake_case = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A__ )
return scheduler
def UpperCamelCase ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case = model(**A__ , use_cache=A__ )[0]
snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case , snake_case = model(**A__ , labels=A__ , use_cache=A__ )[:2]
else:
# compute label smoothed loss
snake_case = model(**A__ , use_cache=A__ )[0]
snake_case = torch.nn.functional.log_softmax(A__ , dim=-1 )
snake_case , snake_case = self.loss_fn(A__ , A__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase ( self , A__ , A__ ) -> Any:
snake_case = inputs.pop('''labels''' )
snake_case , snake_case = self._compute_loss(A__ , A__ , A__ )
return loss
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
snake_case = self._prepare_inputs(A__ )
snake_case = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **A__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] )
snake_case = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
snake_case , snake_case = self._compute_loss(A__ , A__ , A__ )
snake_case = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def UpperCamelCase ( self , A__ , A__ ) -> List[str]:
# If PAD token is not defined at least EOS token has to be defined
snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F""" padded to `max_length`={max_length}""" )
snake_case = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case = tensor
return padded_tensor
| 44
| 1
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class _lowercase :
def __init__( self ) -> Tuple:
snake_case = {}
def UpperCamelCase ( self , A__ , A__ , A__=1 ) -> Union[str, Any]:
if self.graph.get(A__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case = [[w, v]]
if not self.graph.get(A__ ):
snake_case = []
def UpperCamelCase ( self ) -> List[str]:
return list(self.graph )
def UpperCamelCase ( self , A__ , A__ ) -> Optional[int]:
if self.graph.get(A__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A__ )
def UpperCamelCase ( self , A__=-2 , A__=-1 ) -> Union[str, Any]:
if s == d:
return []
snake_case = []
snake_case = []
if s == -2:
snake_case = list(self.graph )[0]
stack.append(A__ )
visited.append(A__ )
snake_case = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A__ ) != 0:
snake_case = stack[len(A__ ) - 1]
else:
snake_case = ss
# check if se have reached the starting point
if len(A__ ) == 0:
return visited
def UpperCamelCase ( self , A__=-1 ) -> Tuple:
if c == -1:
snake_case = floor(random() * 1_00_00 ) + 10
for i in range(A__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
snake_case = floor(random() * c ) + 1
if n != i:
self.add_pair(A__ , A__ , 1 )
def UpperCamelCase ( self , A__=-2 ) -> int:
snake_case = deque()
snake_case = []
if s == -2:
snake_case = list(self.graph )[0]
d.append(A__ )
visited.append(A__ )
while d:
snake_case = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase ( self , A__ ) -> Optional[Any]:
snake_case = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def UpperCamelCase ( self , A__ ) -> List[Any]:
return len(self.graph[u] )
def UpperCamelCase ( self , A__=-2 ) -> Any:
snake_case = []
snake_case = []
if s == -2:
snake_case = list(self.graph )[0]
stack.append(A__ )
visited.append(A__ )
snake_case = s
snake_case = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A__ ) != 0:
snake_case = stack[len(A__ ) - 1]
else:
snake_case = ss
# check if se have reached the starting point
if len(A__ ) == 0:
return sorted_nodes
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = []
snake_case = []
snake_case = list(self.graph )[0]
stack.append(A__ )
visited.append(A__ )
snake_case = -2
snake_case = []
snake_case = s
snake_case = False
snake_case = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case = len(A__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case = True
if len(A__ ) != 0:
snake_case = stack[len(A__ ) - 1]
else:
snake_case = False
indirect_parents.append(A__ )
snake_case = s
snake_case = ss
# check if se have reached the starting point
if len(A__ ) == 0:
return list(A__ )
def UpperCamelCase ( self ) -> Any:
snake_case = []
snake_case = []
snake_case = list(self.graph )[0]
stack.append(A__ )
visited.append(A__ )
snake_case = -2
snake_case = []
snake_case = s
snake_case = False
snake_case = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case = len(A__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case = True
if len(A__ ) != 0:
snake_case = stack[len(A__ ) - 1]
else:
snake_case = False
indirect_parents.append(A__ )
snake_case = s
snake_case = ss
# check if se have reached the starting point
if len(A__ ) == 0:
return False
def UpperCamelCase ( self , A__=-2 , A__=-1 ) -> Optional[int]:
snake_case = time()
self.dfs(A__ , A__ )
snake_case = time()
return end - begin
def UpperCamelCase ( self , A__=-2 ) -> List[str]:
snake_case = time()
self.bfs(A__ )
snake_case = time()
return end - begin
class _lowercase :
def __init__( self ) -> List[Any]:
snake_case = {}
def UpperCamelCase ( self , A__ , A__ , A__=1 ) -> Dict:
# check if the u exists
if self.graph.get(A__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case = [[w, v]]
# add the other way
if self.graph.get(A__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case = [[w, u]]
def UpperCamelCase ( self , A__ , A__ ) -> Tuple:
if self.graph.get(A__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A__ )
# the other way round
if self.graph.get(A__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A__ )
def UpperCamelCase ( self , A__=-2 , A__=-1 ) -> Optional[Any]:
if s == d:
return []
snake_case = []
snake_case = []
if s == -2:
snake_case = list(self.graph )[0]
stack.append(A__ )
visited.append(A__ )
snake_case = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A__ ) != 0:
snake_case = stack[len(A__ ) - 1]
else:
snake_case = ss
# check if se have reached the starting point
if len(A__ ) == 0:
return visited
def UpperCamelCase ( self , A__=-1 ) -> List[str]:
if c == -1:
snake_case = floor(random() * 1_00_00 ) + 10
for i in range(A__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 1_02 ) + 1 ):
snake_case = floor(random() * c ) + 1
if n != i:
self.add_pair(A__ , A__ , 1 )
def UpperCamelCase ( self , A__=-2 ) -> Optional[Any]:
snake_case = deque()
snake_case = []
if s == -2:
snake_case = list(self.graph )[0]
d.append(A__ )
visited.append(A__ )
while d:
snake_case = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def UpperCamelCase ( self , A__ ) -> str:
return len(self.graph[u] )
def UpperCamelCase ( self ) -> int:
snake_case = []
snake_case = []
snake_case = list(self.graph )[0]
stack.append(A__ )
visited.append(A__ )
snake_case = -2
snake_case = []
snake_case = s
snake_case = False
snake_case = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case = len(A__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case = True
if len(A__ ) != 0:
snake_case = stack[len(A__ ) - 1]
else:
snake_case = False
indirect_parents.append(A__ )
snake_case = s
snake_case = ss
# check if se have reached the starting point
if len(A__ ) == 0:
return list(A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = []
snake_case = []
snake_case = list(self.graph )[0]
stack.append(A__ )
visited.append(A__ )
snake_case = -2
snake_case = []
snake_case = s
snake_case = False
snake_case = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case = len(A__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case = True
if len(A__ ) != 0:
snake_case = stack[len(A__ ) - 1]
else:
snake_case = False
indirect_parents.append(A__ )
snake_case = s
snake_case = ss
# check if se have reached the starting point
if len(A__ ) == 0:
return False
def UpperCamelCase ( self ) -> Any:
return list(self.graph )
def UpperCamelCase ( self , A__=-2 , A__=-1 ) -> List[str]:
snake_case = time()
self.dfs(A__ , A__ )
snake_case = time()
return end - begin
def UpperCamelCase ( self , A__=-2 ) -> int:
snake_case = time()
self.bfs(A__ )
snake_case = time()
return end - begin
| 44
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCamelCase ( a : List[str] ) ->str:
snake_case = []
for line in lines:
snake_case = re.sub(R'''#.*''' , '''''' , a ) # remove comments
if line:
filtered_lines.append(a )
snake_case = '''\n'''.join(a )
# Make a hash from all this code
snake_case = full_str.encode('''utf-8''' )
return shaaaa(a ).hexdigest()
# get importable module names and hash for caching
_lowercase = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowercase = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowercase = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
_lowercase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 44
| 1
|
'''simple docstring'''
import doctest
from collections import deque
import numpy as np
class _lowercase :
def __init__( self ) -> None:
snake_case = [2, 1, 2, -1]
snake_case = [1, 2, 3, 4]
def UpperCamelCase ( self ) -> list[float]:
snake_case = len(self.first_signal )
snake_case = len(self.second_signal )
snake_case = max(A__ , A__ )
# create a zero matrix of max_length x max_length
snake_case = [[0] * max_length for i in range(A__ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(A__ ):
snake_case = deque(self.second_signal )
rotated_signal.rotate(A__ )
for j, item in enumerate(A__ ):
matrix[i][j] += item
# multiply the matrix with the first signal
snake_case = np.matmul(np.transpose(A__ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(A__ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 44
|
'''simple docstring'''
_lowercase = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 1
|
'''simple docstring'''
from numpy import exp, pi, sqrt
def __UpperCamelCase ( a : int , a : float = 0.0 , a : float = 1.0 ) ->int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = IFInpaintingSuperResolutionPipeline
_UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase ( self ) -> int:
return self._get_superresolution_dummy_components()
def UpperCamelCase ( self , A__ , A__=0 ) -> Union[str, Any]:
if str(A__ ).startswith('''mps''' ):
snake_case = torch.manual_seed(A__ )
else:
snake_case = torch.Generator(device=A__ ).manual_seed(A__ )
snake_case = floats_tensor((1, 3, 16, 16) , rng=random.Random(A__ ) ).to(A__ )
snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def UpperCamelCase ( self ) -> List[str]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase ( self ) -> Optional[Any]:
self._test_save_load_local()
def UpperCamelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 44
| 1
|
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def __UpperCamelCase ( a : int ) ->int:
snake_case = tmp_path / '''file.csv'''
snake_case = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def __UpperCamelCase ( a : Tuple ) ->Tuple:
snake_case = tmp_path / '''malformed_file.csv'''
snake_case = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def __UpperCamelCase ( a : List[str] , a : List[str] ) ->Any:
snake_case = tmp_path / '''csv_with_image.csv'''
snake_case = textwrap.dedent(
f"""\
image
{image_file}
""" )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def __UpperCamelCase ( a : int ) ->List[str]:
snake_case = tmp_path / '''csv_with_label.csv'''
snake_case = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
@pytest.fixture
def __UpperCamelCase ( a : int ) ->Any:
snake_case = tmp_path / '''csv_with_int_list.csv'''
snake_case = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(a , '''w''' ) as f:
f.write(a )
return str(a )
def __UpperCamelCase ( a : str , a : Dict , a : Tuple ) ->List[str]:
snake_case = Csv()
snake_case = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(a , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(a ) in record.message
for record in caplog.records )
@require_pil
def __UpperCamelCase ( a : Union[str, Any] ) ->Any:
with open(a , encoding='''utf-8''' ) as f:
snake_case = f.read().splitlines()[1]
snake_case = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
snake_case = csv._generate_tables([[csv_file_with_image]] )
snake_case = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
snake_case = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def __UpperCamelCase ( a : str ) ->Union[str, Any]:
with open(a , encoding='''utf-8''' ) as f:
snake_case = f.read().splitlines()[1:]
snake_case = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
snake_case = csv._generate_tables([[csv_file_with_label]] )
snake_case = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
snake_case = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(a ) for label in labels]
def __UpperCamelCase ( a : Dict ) ->Optional[Any]:
snake_case = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda a : [int(a ) for i in x.split()]} )
snake_case = csv._generate_tables([[csv_file_with_int_list]] )
snake_case = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
snake_case = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 44
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase = logging.get_logger(__name__)
class _lowercase ( __a ):
def __init__( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]:
snake_case = feature_size
snake_case = sampling_rate
snake_case = padding_value
snake_case = kwargs.pop('''padding_side''' , '''right''' )
snake_case = kwargs.pop('''return_attention_mask''' , A__ )
super().__init__(**A__ )
def UpperCamelCase ( self , A__ , A__ = True , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
snake_case = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
snake_case = processed_features[self.model_input_names[0]]
snake_case = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A__ ) == 0:
if return_attention_mask:
snake_case = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
snake_case = required_input[0]
if isinstance(A__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
snake_case = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A__ ):
snake_case = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A__ ):
snake_case = '''tf'''
elif is_torch_tensor(A__ ):
snake_case = '''pt'''
elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ):
snake_case = '''np'''
else:
raise ValueError(
F"""type of {first_element} unknown: {type(A__ )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
snake_case = to_numpy(A__ )
else:
snake_case = [to_numpy(A__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
snake_case = self._get_padding_strategies(padding=A__ , max_length=A__ )
snake_case = processed_features[self.model_input_names[0]]
snake_case = len(A__ )
if not all(len(A__ ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
snake_case = []
for i in range(A__ ):
snake_case = {k: v[i] for k, v in processed_features.items()}
# truncation
snake_case = self._truncate(
A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , )
truncated_inputs.append(A__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
snake_case = PaddingStrategy.MAX_LENGTH
snake_case = {}
for i in range(A__ ):
# padding
snake_case = self._pad(
truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , )
for key, value in outputs.items():
if key not in batch_outputs:
snake_case = []
if value.dtype is np.dtype(np.floataa ):
snake_case = value.astype(np.floataa )
batch_outputs[key].append(A__ )
return BatchFeature(A__ , tensor_type=A__ )
def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> dict:
snake_case = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
snake_case = len(A__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
snake_case = np.ones(len(A__ ) , dtype=np.intaa )
if needs_to_be_padded:
snake_case = max_length - len(A__ )
if self.padding_side == "right":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
snake_case = np.pad(
A__ , A__ , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
snake_case = np.pad(
A__ , A__ , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , ) -> Union[str, Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
snake_case = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = len(A__ ) > max_length
if needs_to_be_truncated:
snake_case = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
snake_case = processed_features['''attention_mask'''][:max_length]
return processed_features
def UpperCamelCase ( self , A__=False , A__=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A__ , A__ ):
snake_case = PaddingStrategy(A__ )
elif isinstance(A__ , A__ ):
snake_case = padding
else:
snake_case = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 44
| 1
|
'''simple docstring'''
_lowercase = [
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 44
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _lowercase ( yaml.SafeLoader ):
def UpperCamelCase ( self , A__ ) -> List[str]:
snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value]
snake_case = [tuple(A__ ) if isinstance(A__ , A__ ) else key for key in keys]
snake_case = Counter(A__ )
snake_case = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCamelCase ( self , A__ , A__=False ) -> List[Any]:
snake_case = super().construct_mapping(A__ , deep=A__ )
self._check_no_duplicates_on_constructed_node(A__ )
return mapping
def __UpperCamelCase ( a : str ) ->Tuple[Optional[str], str]:
snake_case = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
snake_case = full_content[1:].index('''---''' ) + 1
snake_case = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class _lowercase ( __a ):
# class attributes
_UpperCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case , snake_case = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(A__ )
else:
return cls()
def UpperCamelCase ( self , A__ ) -> str:
if path.exists():
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case = readme_file.read()
else:
snake_case = None
snake_case = self._to_readme(A__ )
with open(A__ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(A__ )
def UpperCamelCase ( self , A__ = None ) -> str:
if readme_content is not None:
snake_case , snake_case = _split_yaml_from_readme(A__ )
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
snake_case = yaml.load(A__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
snake_case = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**A__ )
def UpperCamelCase ( self ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=A__ , allow_unicode=A__ , encoding='''utf-8''' , ).decode('''utf-8''' )
_lowercase = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowercase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
_lowercase = ap.parse_args()
_lowercase = Path(args.readme_filepath)
_lowercase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 44
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCamelCase ( a : Union[str, Any] , a : Optional[Any] , a : Any ) ->Dict:
# Initialise PyTorch model
snake_case = LxmertConfig.from_json_file(a )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case = LxmertForPreTraining(a )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(a , a , a )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 44
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = CodeGenTokenizer
_UpperCAmelCase = CodeGenTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = {'''add_prefix_space''': True}
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
snake_case = dict(zip(A__ , range(len(A__ ) ) ) )
snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case = {'''unk_token''': '''<unk>'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
def UpperCamelCase ( self , **A__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , **A__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , A__ ) -> Tuple:
snake_case = '''lower newer'''
snake_case = '''lower newer'''
return input_text, output_text
def UpperCamelCase ( self ) -> List[Any]:
snake_case = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = '''lower newer'''
snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ )
self.assertListEqual(A__ , A__ )
snake_case = tokens + [tokenizer.unk_token]
snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer(add_prefix_space=A__ )
snake_case = '''lower newer'''
# Testing tokenization
snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids without special tokens
snake_case = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids with special tokens
snake_case = self.get_rust_tokenizer(add_prefix_space=A__ )
snake_case = tokenizer.encode(A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# Testing the unknown token
snake_case = tokens + [rust_tokenizer.unk_token]
snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> List[str]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCamelCase ( self , A__=15 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
def UpperCamelCase ( self ) -> Tuple:
snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input looooooooong''', '''This is a simple input''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
snake_case = tokenizer.pad_token_id
snake_case = tokenizer(A__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' )
snake_case = tokenizer(*A__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def UpperCamelCase ( self ) -> str:
snake_case = '''$$$'''
snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A__ , add_bos_token=A__ )
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = tokenizer.bos_token_id
snake_case = tokenizer(A__ )
snake_case = tokenizer(A__ )
self.assertEqual(out_s.input_ids[0] , A__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case = tokenizer.decode(out_s.input_ids )
snake_case = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
snake_case = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
snake_case = '''\nif len_a > len_b: result = a\nelse: result = b'''
snake_case = tokenizer.encode(A__ )
snake_case = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
snake_case = tokenizer.decode(A__ , truncate_before_pattern=A__ )
self.assertEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
pass
| 44
| 1
|
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
_lowercase = '3'
print('Python version:', sys.version)
print('transformers version:', transformers.__version__)
try:
import torch
print('Torch version:', torch.__version__)
print('Cuda available:', torch.cuda.is_available())
print('Cuda version:', torch.version.cuda)
print('CuDNN version:', torch.backends.cudnn.version())
print('Number of GPUs available:', torch.cuda.device_count())
print('NCCL version:', torch.cuda.nccl.version())
except ImportError:
print('Torch version:', None)
try:
import deepspeed
print('DeepSpeed version:', deepspeed.__version__)
except ImportError:
print('DeepSpeed version:', None)
try:
import tensorflow as tf
print('TensorFlow version:', tf.__version__)
print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU')))
print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU')))
except ImportError:
print('TensorFlow version:', None)
| 44
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , ) -> List[Any]:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def UpperCamelCase ( self ) -> int:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> int:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = TFViTModel(config=A__ )
snake_case = model(A__ , training=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]:
snake_case = self.type_sequence_label_size
snake_case = TFViTForImageClassification(A__ )
snake_case = model(A__ , labels=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = TFViTForImageClassification(A__ )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> List[Any]:
snake_case = TFViTModelTester(self )
snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> str:
pass
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) )
def UpperCamelCase ( self ) -> List[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(A__ )
def __UpperCamelCase ( ) ->Any:
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=A__ , return_tensors='''tf''' )
# forward pass
snake_case = model(**A__ )
# verify the logits
snake_case = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A__ )
snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
| 44
| 1
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _lowercase :
@staticmethod
def UpperCamelCase ( *A__ , **A__ ) -> List[Any]:
pass
def __UpperCamelCase ( a : Image ) ->str:
snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase ( self , A__ , A__ ) -> List[Any]:
snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ )
import datasets
snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
snake_case = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , A__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def UpperCamelCase ( self ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase ( self ) -> Dict:
snake_case = '''Intel/dpt-large'''
snake_case = pipeline('''depth-estimation''' , model=A__ )
snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
snake_case = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def UpperCamelCase ( self ) -> Any:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 44
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_lowercase = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def __UpperCamelCase ( a : Dict=True ) ->str:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__a ) )
class _lowercase ( __a ):
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCamelCase ( self , A__ , A__ ) -> str:
with TemporaryDirectory() as tmp_dir:
snake_case = dataset_module_factory(A__ , cache_dir=A__ )
snake_case = import_main_class(dataset_module.module_path , dataset=A__ )
snake_case = builder_cls(
cache_dir=A__ , config_name=A__ , hash=dataset_module.hash , )
snake_case = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=A__ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
snake_case = cached_path(A__ , cache_dir=A__ )
self.assertTrue(os.path.exists(A__ ) )
@pytest.mark.integration
def __UpperCamelCase ( a : List[str] ) ->Any:
snake_case = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a )
snake_case = import_main_class(dataset_module.module_path )
snake_case = builder_cls(
cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
snake_case = None
builder_instance.download_and_prepare()
snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __UpperCamelCase ( a : Any ) ->Union[str, Any]:
snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a )
snake_case = import_main_class(dataset_module.module_path , dataset=a )
snake_case = builder_cls(
cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , )
snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a , a )
assert "train" in ds
assert isinstance(ds['''train'''] , a )
assert next(iter(ds['''train'''] ) )
| 44
| 1
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 44
|
'''simple docstring'''
def __UpperCamelCase ( a : int , a : int ) ->int:
while b:
snake_case , snake_case = b, a % b
return a
def __UpperCamelCase ( a : int , a : int ) ->int:
return a if b == 0 else euclidean_gcd_recursive(a , a % b )
def __UpperCamelCase ( ) ->Optional[Any]:
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 44
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = LEDTokenizerFast
_UpperCAmelCase = True
def UpperCamelCase ( self ) -> int:
super().setUp()
snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
snake_case = dict(zip(A__ , range(len(A__ ) ) ) )
snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case = {'''unk_token''': '''<unk>'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
def UpperCamelCase ( self , **A__ ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , **A__ ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , A__ ) -> Dict:
return "lower newer", "lower newer"
@cached_property
def UpperCamelCase ( self ) -> str:
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def UpperCamelCase ( self ) -> Optional[int]:
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def UpperCamelCase ( self ) -> str:
snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
snake_case = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case = tokenizer(A__ , max_length=len(A__ ) , padding=A__ , return_tensors='''pt''' )
self.assertIsInstance(A__ , A__ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
snake_case = batch.input_ids.tolist()[0]
self.assertListEqual(A__ , A__ )
@require_torch
def UpperCamelCase ( self ) -> Any:
snake_case = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case = tokenizer(A__ , padding=A__ , return_tensors='''pt''' )
self.assertIn('''input_ids''' , A__ )
self.assertIn('''attention_mask''' , A__ )
self.assertNotIn('''labels''' , A__ )
self.assertNotIn('''decoder_attention_mask''' , A__ )
@require_torch
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = [
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case = tokenizer(text_target=A__ , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def UpperCamelCase ( self ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case = tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=A__ , truncation=A__ , return_tensors='''pt''' )
self.assertIsInstance(A__ , A__ )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = ['''A long paragraph for summarization.''']
snake_case = [
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case = tokenizer(A__ , return_tensors='''pt''' )
snake_case = tokenizer(text_target=A__ , return_tensors='''pt''' )
snake_case = inputs['''input_ids''']
snake_case = targets['''input_ids''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def UpperCamelCase ( self ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
snake_case = ['''Summary of the text.''', '''Another summary.''']
snake_case = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
snake_case = tokenizer(A__ , padding=A__ )
snake_case = [[0] * len(A__ ) for x in encoded_output['''input_ids''']]
snake_case = tokenizer.pad(A__ )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , A__ )
def UpperCamelCase ( self ) -> Tuple:
pass
def UpperCamelCase ( self ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
snake_case = self.tokenizer_class.from_pretrained(A__ , **A__ )
snake_case = '''A, <mask> AllenNLP sentence.'''
snake_case = tokenizer_r.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__ )
snake_case = tokenizer_p.encode_plus(A__ , add_special_tokens=A__ , return_token_type_ids=A__ )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
snake_case = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
snake_case = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
A__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
A__ , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 44
|
'''simple docstring'''
import argparse
import copy
def __UpperCamelCase ( a : Union[str, Any] ) ->Tuple:
snake_case = {}
with open(a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[1], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[0], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __UpperCamelCase ( a : Dict , a : Tuple ) ->int:
with open(a ) as f:
snake_case = f.read(1 )
snake_case = start_node
snake_case = []
snake_case = start_node
snake_case = 0
while visiting not in first_solution:
snake_case = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(a ) and k[0] not in first_solution:
snake_case = k[1]
snake_case = k[0]
first_solution.append(a )
snake_case = distance_of_first_solution + int(a )
snake_case = best_node
first_solution.append(a )
snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __UpperCamelCase ( a : Optional[int] , a : str ) ->str:
snake_case = []
for n in solution[1:-1]:
snake_case = solution.index(a )
for kn in solution[1:-1]:
snake_case = solution.index(a )
if n == kn:
continue
snake_case = copy.deepcopy(a )
snake_case = kn
snake_case = n
snake_case = 0
for k in _tmp[:-1]:
snake_case = _tmp[_tmp.index(a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case = distance + int(i[1] )
_tmp.append(a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __UpperCamelCase ( a : Any , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] ) ->List[Any]:
snake_case = 1
snake_case = first_solution
snake_case = []
snake_case = distance_of_first_solution
snake_case = solution
while count <= iters:
snake_case = find_neighborhood(a , a )
snake_case = 0
snake_case = neighborhood[index_of_best_solution]
snake_case = len(a ) - 1
snake_case = False
while not found:
snake_case = 0
while i < len(a ):
if best_solution[i] != solution[i]:
snake_case = best_solution[i]
snake_case = solution[i]
break
snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case = True
snake_case = best_solution[:-1]
snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case = cost
snake_case = solution
else:
snake_case = index_of_best_solution + 1
snake_case = neighborhood[index_of_best_solution]
if len(a ) >= size:
tabu_list.pop(0 )
snake_case = count + 1
return best_solution_ever, best_cost
def __UpperCamelCase ( a : Union[str, Any]=None ) ->Optional[Any]:
snake_case = generate_neighbours(args.File )
snake_case , snake_case = generate_first_solution(
args.File , a )
snake_case , snake_case = tabu_search(
a , a , a , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 44
| 1
|
'''simple docstring'''
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def __UpperCamelCase ( a : Optional[Any] , a : int , a : List[Any] , a : Union[str, Any]=1024 ) ->Tuple:
snake_case , snake_case = [], []
snake_case = list(zip(a , a ) )
snake_case , snake_case = sorted_examples[0]
def is_too_big(a : Tuple ):
return tok(a , return_tensors='''pt''' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
snake_case = new_src + ''' ''' + src
snake_case = new_tgt + ''' ''' + tgt
if is_too_big(a ) or is_too_big(a ): # cant fit, finalize example
finished_src.append(a )
finished_tgt.append(a )
snake_case , snake_case = src, tgt
else: # can fit, keep adding
snake_case , snake_case = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(a )
finished_tgt.append(a )
return finished_src, finished_tgt
def __UpperCamelCase ( a : List[str] , a : Path , a : Union[str, Any] , a : Optional[int] ) ->Optional[Any]:
snake_case = Path(a )
save_path.mkdir(exist_ok=a )
for split in ["train"]:
snake_case , snake_case = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
snake_case = [x.rstrip() for x in Path(a ).open().readlines()]
snake_case = [x.rstrip() for x in Path(a ).open().readlines()]
snake_case , snake_case = pack_examples(a , a , a , a )
print(f"""packed {split} split from {len(a )} examples -> {len(a )}.""" )
Path(save_path / f"""{split}.source""" ).open('''w''' ).write('''\n'''.join(a ) )
Path(save_path / f"""{split}.target""" ).open('''w''' ).write('''\n'''.join(a ) )
for split in ["val", "test"]:
snake_case , snake_case = data_dir / f"""{split}.source""", data_dir / f"""{split}.target"""
shutil.copyfile(a , save_path / f"""{split}.source""" )
shutil.copyfile(a , save_path / f"""{split}.target""" )
def __UpperCamelCase ( ) ->Any:
snake_case = argparse.ArgumentParser()
parser.add_argument('''--tok_name''' , type=a , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''--max_seq_len''' , type=a , default=128 )
parser.add_argument('''--data_dir''' , type=a )
parser.add_argument('''--save_path''' , type=a )
snake_case = parser.parse_args()
snake_case = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(a , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 44
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 44
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
def __UpperCamelCase ( a : float , a : float , a : float ) ->tuple:
snake_case = namedtuple('''result''' , '''name value''' )
if (voltage, current, power).count(0 ) != 1:
raise ValueError('''Only one argument must be 0''' )
elif power < 0:
raise ValueError(
'''Power cannot be negative in any electrical/electronics system''' )
elif voltage == 0:
return result('''voltage''' , power / current )
elif current == 0:
return result('''current''' , power / voltage )
elif power == 0:
return result('''power''' , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowercase ( __a ):
_UpperCAmelCase = '''WhisperFeatureExtractor'''
_UpperCAmelCase = '''WhisperTokenizer'''
def __init__( self , A__ , A__ ) -> Optional[Any]:
super().__init__(A__ , A__ )
snake_case = self.feature_extractor
snake_case = False
def UpperCamelCase ( self , A__=None , A__=None , A__=True ) -> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=A__ , language=A__ , no_timestamps=A__ )
def __call__( self , *A__ , **A__ ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
snake_case = kwargs.pop('''audio''' , A__ )
snake_case = kwargs.pop('''sampling_rate''' , A__ )
snake_case = kwargs.pop('''text''' , A__ )
if len(A__ ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
snake_case = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
snake_case = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> str:
return self.tokenizer.decode(*A__ , **A__ )
def UpperCamelCase ( self , A__ , A__="np" ) -> Optional[Any]:
return self.tokenizer.get_prompt_ids(A__ , return_tensors=A__ )
| 44
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(
description=(
'Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'
' Distillation'
)
)
parser.add_argument('--model_type', default='bert', choices=['bert'])
parser.add_argument('--model_name', default='bert-base-uncased', type=str)
parser.add_argument('--dump_checkpoint', default='serialization_dir/tf_bert-base-uncased_0247911.pth', type=str)
parser.add_argument('--vocab_transform', action='store_true')
_lowercase = parser.parse_args()
if args.model_type == "bert":
_lowercase = BertForMaskedLM.from_pretrained(args.model_name)
_lowercase = 'bert'
else:
raise ValueError('args.model_type should be "bert".')
_lowercase = model.state_dict()
_lowercase = {}
for w in ["word_embeddings", "position_embeddings"]:
_lowercase = state_dict[f'{prefix}.embeddings.{w}.weight']
for w in ["weight", "bias"]:
_lowercase = state_dict[f'{prefix}.embeddings.LayerNorm.{w}']
_lowercase = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
_lowercase = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'
]
_lowercase = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'
]
_lowercase = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'
]
_lowercase = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'
]
_lowercase = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'
]
_lowercase = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'
]
_lowercase = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'
]
_lowercase = state_dict[
f'{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'
]
std_idx += 1
_lowercase = state_dict['cls.predictions.decoder.weight']
_lowercase = state_dict['cls.predictions.bias']
if args.vocab_transform:
for w in ["weight", "bias"]:
_lowercase = state_dict[f'cls.predictions.transform.dense.{w}']
_lowercase = state_dict[f'cls.predictions.transform.LayerNorm.{w}']
print(f'N layers selected for distillation: {std_idx}')
print(f'Number of params transferred for distillation: {len(compressed_sd.keys())}')
print(f'Save transferred checkpoint to {args.dump_checkpoint}.')
torch.save(compressed_sd, args.dump_checkpoint)
| 44
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowercase ( __a ):
_UpperCAmelCase = '''char'''
_UpperCAmelCase = '''bpe'''
_UpperCAmelCase = '''wp'''
_lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowercase ( __a ):
_UpperCAmelCase = ['''image_processor''', '''char_tokenizer''']
_UpperCAmelCase = '''ViTImageProcessor'''
_UpperCAmelCase = '''MgpstrTokenizer'''
def __init__( self , A__=None , A__=None , **A__ ) -> List[Any]:
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A__ , )
snake_case = kwargs.pop('''feature_extractor''' )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
snake_case = tokenizer
snake_case = AutoTokenizer.from_pretrained('''gpt2''' )
snake_case = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(A__ , A__ )
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> List[str]:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None:
snake_case = self.char_tokenizer(A__ , return_tensors=A__ , **A__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self , A__ ) -> Dict:
snake_case , snake_case , snake_case = sequences
snake_case = char_preds.size(0 )
snake_case , snake_case = self._decode_helper(A__ , '''char''' )
snake_case , snake_case = self._decode_helper(A__ , '''bpe''' )
snake_case , snake_case = self._decode_helper(A__ , '''wp''' )
snake_case = []
snake_case = []
for i in range(A__ ):
snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case = scores.index(max(A__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case = {}
snake_case = final_strs
snake_case = final_scores
snake_case = char_strs
snake_case = bpe_strs
snake_case = wp_strs
return out
def UpperCamelCase ( self , A__ , A__ ) -> Optional[Any]:
if format == DecodeType.CHARACTER:
snake_case = self.char_decode
snake_case = 1
snake_case = '''[s]'''
elif format == DecodeType.BPE:
snake_case = self.bpe_decode
snake_case = 2
snake_case = '''#'''
elif format == DecodeType.WORDPIECE:
snake_case = self.wp_decode
snake_case = 1_02
snake_case = '''[SEP]'''
else:
raise ValueError(F"""Format {format} is not supported.""" )
snake_case , snake_case = [], []
snake_case = pred_logits.size(0 )
snake_case = pred_logits.size(1 )
snake_case , snake_case = pred_logits.topk(1 , dim=-1 , largest=A__ , sorted=A__ )
snake_case = preds_index.view(-1 , A__ )[:, 1:]
snake_case = decoder(A__ )
snake_case , snake_case = torch.nn.functional.softmax(A__ , dim=2 ).max(dim=2 )
snake_case = preds_max_prob[:, 1:]
for index in range(A__ ):
snake_case = preds_str[index].find(A__ )
snake_case = preds_str[index][:pred_eos]
snake_case = preds_index[index].cpu().tolist()
snake_case = pred_index.index(A__ ) if eos_token in pred_index else -1
snake_case = preds_max_prob[index][: pred_eos_index + 1]
snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A__ )
conf_scores.append(A__ )
return dec_strs, conf_scores
def UpperCamelCase ( self , A__ ) -> int:
snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(A__ )]
return decode_strs
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.bpe_tokenizer.batch_decode(A__ )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(A__ )]
return decode_strs
| 44
| 1
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCamelCase ( a : List[str] ) ->str:
snake_case = []
for line in lines:
snake_case = re.sub(R'''#.*''' , '''''' , a ) # remove comments
if line:
filtered_lines.append(a )
snake_case = '''\n'''.join(a )
# Make a hash from all this code
snake_case = full_str.encode('''utf-8''' )
return shaaaa(a ).hexdigest()
# get importable module names and hash for caching
_lowercase = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowercase = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowercase = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
_lowercase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 44
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_lowercase , _lowercase , _lowercase = False, False, False
@dataclass
class _lowercase :
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = None
# Automatically constructed
_UpperCAmelCase = "dict"
_UpperCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_UpperCAmelCase = field(default='''Audio''' , init=__a , repr=__a )
def __call__( self ) -> Optional[Any]:
return self.pa_type
def UpperCamelCase ( self , A__ ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(A__ , A__ ):
return {"bytes": None, "path": value}
elif isinstance(A__ , A__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case = BytesIO()
sf.write(A__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
snake_case = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67
snake_case = BytesIO(bytes() )
sf.write(A__ , A__ , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def UpperCamelCase ( self , A__ , A__ = None ) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
snake_case , snake_case = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
snake_case = xsplitext(A__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
snake_case = token_per_repo_id or {}
snake_case = path.split('''::''' )[-1]
try:
snake_case = string_to_dict(A__ , config.HUB_DATASETS_URL )['''repo_id''']
snake_case = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case = None
with xopen(A__ , '''rb''' , use_auth_token=A__ ) as f:
snake_case , snake_case = sf.read(A__ )
else:
snake_case , snake_case = sf.read(A__ )
snake_case = array.T
if self.mono:
snake_case = librosa.to_mono(A__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case = librosa.resample(A__ , orig_sr=A__ , target_sr=self.sampling_rate )
snake_case = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def UpperCamelCase ( self , A__ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
snake_case = pa.array([None] * len(A__ ) , type=pa.binary() )
snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case = pa.array([None] * len(A__ ) , type=pa.string() )
snake_case = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
snake_case = pa.array([Audio().encode_example(A__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
snake_case = storage.field('''bytes''' )
else:
snake_case = pa.array([None] * len(A__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
snake_case = storage.field('''path''' )
else:
snake_case = pa.array([None] * len(A__ ) , type=pa.string() )
snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(A__ , self.pa_type )
def UpperCamelCase ( self , A__ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(A__ ):
with xopen(A__ , '''rb''' ) as f:
snake_case = f.read()
return bytes_
snake_case = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case = pa.array(
[os.path.basename(A__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(A__ , self.pa_type )
| 44
| 1
|
'''simple docstring'''
from __future__ import annotations
_lowercase = 'Muhammad Umer Farooq'
_lowercase = 'MIT'
_lowercase = '1.0.0'
_lowercase = 'Muhammad Umer Farooq'
_lowercase = 'contact@muhammadumerfarooq.me'
_lowercase = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class _lowercase ( __a ):
def __init__( self , A__ ) -> None:
super().__init__()
snake_case = []
snake_case = domain
def UpperCamelCase ( self , A__ , A__ ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
snake_case = parse.urljoin(self.domain , A__ )
self.urls.append(A__ )
def __UpperCamelCase ( a : str ) ->str:
return ".".join(get_sub_domain_name(a ).split('''.''' )[-2:] )
def __UpperCamelCase ( a : str ) ->str:
return parse.urlparse(a ).netloc
def __UpperCamelCase ( a : str = "https://github.com" ) ->list[str]:
snake_case = get_domain_name(a )
# Initialize the parser
snake_case = Parser(a )
try:
# Open URL
snake_case = requests.get(a )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
snake_case = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
snake_case = requests.get(a )
# Get the valid email.
snake_case = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(a )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(a )
if __name__ == "__main__":
_lowercase = emails_from_url('https://github.com')
print(f'{len(emails)} emails found:')
print('\n'.join(sorted(emails)))
| 44
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _lowercase :
@staticmethod
def UpperCamelCase ( *A__ , **A__ ) -> List[Any]:
pass
def __UpperCamelCase ( a : Image ) ->str:
snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase ( self , A__ , A__ ) -> List[Any]:
snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ )
import datasets
snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
snake_case = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , A__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def UpperCamelCase ( self ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase ( self ) -> Dict:
snake_case = '''Intel/dpt-large'''
snake_case = pipeline('''depth-estimation''' , model=A__ )
snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
snake_case = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def UpperCamelCase ( self ) -> Any:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 44
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['ConditionalDetrFeatureExtractor']
_lowercase = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __UpperCamelCase ( a : Optional[int] ) ->Dict:
snake_case = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a , a )
def __UpperCamelCase ( a : Optional[Any] ) ->int:
snake_case = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
snake_case = s_dict.pop(a )
elif "subsample" in key:
snake_case = s_dict.pop(a )
def __UpperCamelCase ( a : Optional[int] ) ->Optional[int]:
snake_case , snake_case = emb.weight.shape
snake_case = nn.Linear(a , a , bias=a )
snake_case = emb.weight.data
return lin_layer
def __UpperCamelCase ( a : Any , a : Tuple ) ->Tuple:
snake_case = torch.load(a , map_location='''cpu''' )
snake_case = mam_aaa['''args''']
snake_case = mam_aaa['''model''']
snake_case = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(a )
rename_keys(a )
snake_case = state_dict['''decoder.embed_tokens.weight'''].shape[0]
snake_case = args.share_decoder_input_output_embed
snake_case = [int(a ) for i in args.conv_kernel_sizes.split(''',''' )]
snake_case = SpeechaTextConfig(
vocab_size=a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(a ) , conv_channels=args.conv_channels , conv_kernel_sizes=a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=a , num_beams=5 , max_length=200 , use_cache=a , decoder_start_token_id=2 , early_stopping=a , )
snake_case = SpeechaTextForConditionalGeneration(a )
snake_case , snake_case = model.model.load_state_dict(a , strict=a )
if len(a ) > 0 and not set(a ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
snake_case = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case = lm_head_weights
model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowercase = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 44
| 1
|
'''simple docstring'''
from math import sqrt
def __UpperCamelCase ( a : int ) ->int:
snake_case = 0
for i in range(1 , int(sqrt(a ) + 1 ) ):
if n % i == 0 and i != sqrt(a ):
total += i + n // i
elif i == sqrt(a ):
total += i
return total - n
def __UpperCamelCase ( a : int = 1_0000 ) ->int:
snake_case = sum(
i
for i in range(1 , a )
if sum_of_divisors(sum_of_divisors(a ) ) == i and sum_of_divisors(a ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 44
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=__a ):
_UpperCAmelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *A__ , **A__ ) -> Union[str, Any]:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Optional[Any]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Any:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 44
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = IFInpaintingSuperResolutionPipeline
_UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase ( self ) -> int:
return self._get_superresolution_dummy_components()
def UpperCamelCase ( self , A__ , A__=0 ) -> Union[str, Any]:
if str(A__ ).startswith('''mps''' ):
snake_case = torch.manual_seed(A__ )
else:
snake_case = torch.Generator(device=A__ ).manual_seed(A__ )
snake_case = floats_tensor((1, 3, 16, 16) , rng=random.Random(A__ ) ).to(A__ )
snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def UpperCamelCase ( self ) -> List[str]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase ( self ) -> Optional[Any]:
self._test_save_load_local()
def UpperCamelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 44
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _lowercase :
def __init__( self , A__ ) -> None:
snake_case = value
snake_case = None
snake_case = None
class _lowercase :
def __init__( self , A__ ) -> None:
snake_case = tree
def UpperCamelCase ( self , A__ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 1
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
_lowercase = [
'good first issue',
'feature request',
'wip',
]
def __UpperCamelCase ( ) ->List[Any]:
snake_case = Github(os.environ['''GITHUB_TOKEN'''] )
snake_case = g.get_repo('''huggingface/accelerate''' )
snake_case = repo.get_issues(state='''open''' )
for issue in open_issues:
snake_case = sorted([comment for comment in issue.get_comments()] , key=lambda a : i.created_at , reverse=a )
snake_case = comments[0] if len(a ) > 0 else None
snake_case = dt.utcnow()
snake_case = (current_time - issue.updated_at).days
snake_case = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 44
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_lowercase = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def __UpperCamelCase ( a : List[str] ) ->Optional[int]:
snake_case = torch.load(a , map_location='''cpu''' )
return sd
def __UpperCamelCase ( a : Optional[int] , a : Union[str, Any] , a : int=rename_keys_prefix ) ->Tuple:
snake_case = OrderedDict()
snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case = key
for name_pair in rename_keys_prefix:
snake_case = new_key.replace(name_pair[0] , name_pair[1] )
snake_case = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def __UpperCamelCase ( a : Optional[int] , a : int ) ->Union[str, Any]:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
snake_case = '''pretraining'''
if "vcr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 512}
snake_case = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
snake_case = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
snake_case = '''vqa'''
elif "nlvr" in checkpoint_path:
snake_case = {
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
snake_case = '''nlvr'''
snake_case = VisualBertConfig(**a )
# Load State Dict
snake_case = load_state_dict(a )
snake_case = get_new_dict(a , a )
if model_type == "pretraining":
snake_case = VisualBertForPreTraining(a )
elif model_type == "vqa":
snake_case = VisualBertForQuestionAnswering(a )
elif model_type == "nlvr":
snake_case = VisualBertForVisualReasoning(a )
elif model_type == "multichoice":
snake_case = VisualBertForMultipleChoice(a )
model.load_state_dict(a )
# Save Checkpoints
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_lowercase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 1
|
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
_lowercase = logging.getLogger(__name__)
def __UpperCamelCase ( ) ->Dict:
snake_case = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=a , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=a , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=a , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=a , default=1000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=a , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=a , type=a , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=a , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=a , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
snake_case = parser.parse_args()
return args
def __UpperCamelCase ( a : List[Any] ) ->Tuple:
def fn(a : Optional[int] ):
return tokenizer(examples['''text'''] )
return fn
def __UpperCamelCase ( a : Any ) ->int:
snake_case = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
snake_case = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
snake_case = tf.train.Features(feature=a )
snake_case = tf.train.Example(features=a )
snake_case = example.SerializeToString()
records.append(a )
return records
def __UpperCamelCase ( a : Tuple ) ->List[str]:
snake_case = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
snake_case = min(len(a ) , args.limit )
snake_case = dataset.select(range(a ) )
print(f"""Limiting the dataset to {args.limit} entries.""" )
snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
snake_case = os.path.join(args.output_dir , args.split )
if not os.path.exists(a ):
os.makedirs(a )
else:
snake_case = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
snake_case = tokenize_function(a )
snake_case = dataset.map(a , batched=a , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(a : List[str] ):
# Concatenate all texts.
snake_case = {k: sum(examples[k] , [] ) for k in examples.keys()}
snake_case = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
snake_case = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
snake_case = {
k: [t[i : i + args.max_length] for i in range(0 , a , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
snake_case = dataset_tokenized.map(a , batched=a , batch_size=1000 , num_proc=4 )
snake_case = 0
snake_case = 0
for shard in range(0 , len(a ) , args.shard_size ):
snake_case = grouped_dataset[shard : shard + args.shard_size]
snake_case = len(dataset_snapshot['''input_ids'''] )
snake_case = os.path.join(a , f"""dataset-{shard_count}-{records_containing}.tfrecord""" )
snake_case = get_serialized_examples(a )
with tf.io.TFRecordWriter(a ) as out_file:
for i in range(len(a ) ):
snake_case = serialized_examples[i]
out_file.write(a )
print('''Wrote file {} containing {} records'''.format(a , a ) )
shard_count += 1
total_records += records_containing
with open(f"""split-{args.split}-records-count.txt""" , '''w''' ) as f:
print(f"""Total {args.split} records: {total_records}""" , file=a )
if __name__ == "__main__":
_lowercase = parse_args()
main(args)
| 44
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]:
snake_case = original_name.split('''.''' )[0]
snake_case = key.split('''.''' )
snake_case = int(key_list[key_list.index(a ) - 2] )
snake_case = int(key_list[key_list.index(a ) - 1] )
snake_case = orig_block_num - offset
snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def __UpperCamelCase ( a : Tuple ) ->Dict:
snake_case = OrderedDict()
snake_case , snake_case = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case = key[: key.find('''proj''' )]
snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" )
snake_case = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case = key.replace('''head''' , '''classifier''' )
snake_case = value
return new_state_dict
def __UpperCamelCase ( ) ->Optional[int]:
snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case = Image.open(requests.get(a , stream=a ).raw )
return image
@torch.no_grad()
def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]:
snake_case = PoolFormerConfig()
# set attributes based on model_name
snake_case = '''huggingface/label-files'''
snake_case = model_name[-3:]
snake_case = 1000
snake_case = '''imagenet-1k-id2label.json'''
snake_case = (1, 1000)
# set config attributes
snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
snake_case = {int(a ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case = [2, 2, 6, 2]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s24":
snake_case = [4, 4, 12, 4]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s36":
snake_case = [6, 6, 18, 6]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.9
elif size == "m36":
snake_case = [6, 6, 18, 6]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
elif size == "m48":
snake_case = [8, 8, 24, 8]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
# Prepare image
snake_case = prepare_img()
snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
snake_case = torch.load(a , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case = rename_keys(a )
# create HuggingFace model and load state dict
snake_case = PoolFormerForImageClassification(a )
model.load_state_dict(a )
model.eval()
# Define image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case = model(a )
snake_case = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
snake_case = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
snake_case = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
snake_case = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowercase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 1
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> Tuple:
snake_case = 10
def UpperCamelCase ( self ) -> int:
snake_case = [1, 2, 3, 4]
snake_case = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(A__ , self.block_size , 0 ) , A__ )
def UpperCamelCase ( self ) -> List[str]:
snake_case = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
snake_case = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(A__ , self.block_size , 0 ) , A__ )
def UpperCamelCase ( self ) -> List[Any]:
snake_case = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
snake_case = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(A__ , self.block_size , 0 ) , A__ )
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
snake_case , snake_case = process_story(A__ )
self.assertEqual(A__ , [] )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = ''''''
snake_case , snake_case = process_story(A__ )
self.assertEqual(A__ , [] )
self.assertEqual(A__ , [] )
def UpperCamelCase ( self ) -> Any:
snake_case = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
snake_case , snake_case = process_story(A__ )
snake_case = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(A__ , A__ )
snake_case = ['''It was the best of times.''']
self.assertEqual(A__ , A__ )
def UpperCamelCase ( self ) -> str:
snake_case = torch.tensor([1, 2, 3, 4] )
snake_case = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(A__ , 0 ).numpy() , expected.numpy() )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
snake_case = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(A__ , 23 ).numpy() , expected.numpy() )
def UpperCamelCase ( self ) -> Optional[int]:
snake_case = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
snake_case = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(A__ , 1 ).numpy() , expected.numpy() )
def UpperCamelCase ( self ) -> Tuple:
snake_case = 1_01
snake_case = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
snake_case = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
snake_case = compute_token_type_ids(A__ , A__ )
np.testing.assert_array_equal(A__ , A__ )
| 44
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowercase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowercase = logging.getLogger()
def __UpperCamelCase ( ) ->Tuple:
snake_case = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case = parser.parse_args()
return args.f
def __UpperCamelCase ( a : Dict , a : Tuple="eval" ) ->List[Any]:
snake_case = os.path.join(a , f"""{split}_results.json""" )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
return json.load(a )
raise ValueError(f"""can't find {path}""" )
_lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowercase ( __a ):
def UpperCamelCase ( self ) -> List[str]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_flax_glue.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_clm_flax.main()
snake_case = get_results(A__ )
self.assertLess(result['''eval_perplexity'''] , 1_00 )
@slow
def UpperCamelCase ( self ) -> int:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_summarization_flax.main()
snake_case = get_results(A__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_mlm_flax.main()
snake_case = get_results(A__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_ta_mlm_flax.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def UpperCamelCase ( self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case = 7 if get_gpu_count() > 1 else 2
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_flax_ner.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_qa.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 44
| 1
|
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> int:
snake_case = tempfile.mkdtemp()
snake_case = BlipImageProcessor()
snake_case = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
snake_case = BlipaProcessor(A__ , A__ )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase ( self , **A__ ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **A__ ).tokenizer
def UpperCamelCase ( self , **A__ ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **A__ ).image_processor
def UpperCamelCase ( self ) -> Any:
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self ) -> str:
snake_case = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
snake_case = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase ( self ) -> Tuple:
snake_case = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
snake_case = self.get_image_processor(do_normalize=A__ , padding_value=1.0 )
snake_case = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def UpperCamelCase ( self ) -> List[str]:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = BlipaProcessor(tokenizer=A__ , image_processor=A__ )
snake_case = self.prepare_image_inputs()
snake_case = image_processor(A__ , return_tensors='''np''' )
snake_case = processor(images=A__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self ) -> Dict:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = BlipaProcessor(tokenizer=A__ , image_processor=A__ )
snake_case = '''lower newer'''
snake_case = processor(text=A__ )
snake_case = tokenizer(A__ , return_token_type_ids=A__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self ) -> List[str]:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = BlipaProcessor(tokenizer=A__ , image_processor=A__ )
snake_case = '''lower newer'''
snake_case = self.prepare_image_inputs()
snake_case = processor(text=A__ , images=A__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = BlipaProcessor(tokenizer=A__ , image_processor=A__ )
snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case = processor.batch_decode(A__ )
snake_case = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Any:
snake_case = self.get_image_processor()
snake_case = self.get_tokenizer()
snake_case = BlipaProcessor(tokenizer=A__ , image_processor=A__ )
snake_case = '''lower newer'''
snake_case = self.prepare_image_inputs()
snake_case = processor(text=A__ , images=A__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
| 44
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_lowercase = logging.get_logger(__name__)
_lowercase = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _lowercase ( __a ):
def __init__( self , A__=None , A__=None , *A__ , **A__ ) -> Union[str, Any]:
super().__init__(*A__ , **A__ )
if config is None:
assert isinstance(self.model , A__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
snake_case = self.model.config
else:
snake_case = config
snake_case = data_args
snake_case = self.config.tgt_vocab_size if isinstance(self.config , A__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case = label_smoothed_nll_loss
def UpperCamelCase ( self , A__ ) -> Tuple:
if self.optimizer is None:
snake_case = ['''bias''', '''LayerNorm.weight''']
snake_case = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
snake_case = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case = Adafactor
snake_case = {'''scale_parameter''': False, '''relative_step''': False}
else:
snake_case = AdamW
snake_case = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
snake_case = self.args.learning_rate
if self.sharded_ddp:
snake_case = OSS(
params=A__ , optim=A__ , **A__ , )
else:
snake_case = optimizer_cls(A__ , **A__ )
if self.lr_scheduler is None:
snake_case = self._get_lr_scheduler(A__ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def UpperCamelCase ( self , A__ ) -> Tuple:
snake_case = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A__ )
return scheduler
def UpperCamelCase ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case = model(**A__ , use_cache=A__ )[0]
snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case , snake_case = model(**A__ , labels=A__ , use_cache=A__ )[:2]
else:
# compute label smoothed loss
snake_case = model(**A__ , use_cache=A__ )[0]
snake_case = torch.nn.functional.log_softmax(A__ , dim=-1 )
snake_case , snake_case = self.loss_fn(A__ , A__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase ( self , A__ , A__ ) -> Any:
snake_case = inputs.pop('''labels''' )
snake_case , snake_case = self._compute_loss(A__ , A__ , A__ )
return loss
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
snake_case = self._prepare_inputs(A__ )
snake_case = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **A__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] )
snake_case = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
snake_case , snake_case = self._compute_loss(A__ , A__ , A__ )
snake_case = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def UpperCamelCase ( self , A__ , A__ ) -> List[str]:
# If PAD token is not defined at least EOS token has to be defined
snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F""" padded to `max_length`={max_length}""" )
snake_case = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case = tensor
return padded_tensor
| 44
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/config.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/config.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/config.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/config.json',
'bert-base-multilingual-uncased': 'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json',
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/config.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/config.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json'
),
'bert-base-cased-finetuned-mrpc': 'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json',
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json',
'bert-base-german-dbmdz-uncased': 'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json',
'cl-tohoku/bert-base-japanese': 'https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json',
'cl-tohoku/bert-base-japanese-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json'
),
'cl-tohoku/bert-base-japanese-char-whole-word-masking': (
'https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json'
),
'wietsedv/bert-base-dutch-cased': 'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json',
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _lowercase ( __a ):
_UpperCAmelCase = '''bert'''
def __init__( self , A__=3_05_22 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=0 , A__="absolute" , A__=True , A__=None , **A__ , ) -> List[Any]:
super().__init__(pad_token_id=A__ , **A__ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = use_cache
snake_case = classifier_dropout
class _lowercase ( __a ):
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 44
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCamelCase ( a : List[str] ) ->str:
snake_case = []
for line in lines:
snake_case = re.sub(R'''#.*''' , '''''' , a ) # remove comments
if line:
filtered_lines.append(a )
snake_case = '''\n'''.join(a )
# Make a hash from all this code
snake_case = full_str.encode('''utf-8''' )
return shaaaa(a ).hexdigest()
# get importable module names and hash for caching
_lowercase = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowercase = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowercase = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
_lowercase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 44
| 1
|
'''simple docstring'''
from typing import Dict, Optional
import numpy as np
import datasets
_lowercase = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
_lowercase = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
_lowercase = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def __UpperCamelCase ( a : Optional[Any] , a : List[str] , a : Optional[Any] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) ->Optional[Any]:
if label_map is not None:
for old_id, new_id in label_map.items():
snake_case = new_id
# turn into Numpy arrays
snake_case = np.array(a )
snake_case = np.array(a )
if reduce_labels:
snake_case = 255
snake_case = label - 1
snake_case = 255
snake_case = label != ignore_index
snake_case = np.not_equal(a , a )
snake_case = pred_label[mask]
snake_case = np.array(a )[mask]
snake_case = pred_label[pred_label == label]
snake_case = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
snake_case = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
snake_case = np.histogram(a , bins=a , range=(0, num_labels - 1) )[0]
snake_case = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def __UpperCamelCase ( a : List[str] , a : Optional[Any] , a : List[Any] , a : bool , a : Optional[Dict[int, int]] = None , a : bool = False , ) ->Any:
snake_case = np.zeros((num_labels,) , dtype=np.floataa )
snake_case = np.zeros((num_labels,) , dtype=np.floataa )
snake_case = np.zeros((num_labels,) , dtype=np.floataa )
snake_case = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(a , a ):
snake_case , snake_case , snake_case , snake_case = intersect_and_union(
a , a , a , a , a , a )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def __UpperCamelCase ( a : int , a : List[str] , a : Dict , a : bool , a : Optional[int] = None , a : Optional[Dict[int, int]] = None , a : bool = False , ) ->List[str]:
snake_case , snake_case , snake_case , snake_case = total_intersect_and_union(
a , a , a , a , a , a )
# compute metrics
snake_case = {}
snake_case = total_area_intersect.sum() / total_area_label.sum()
snake_case = total_area_intersect / total_area_union
snake_case = total_area_intersect / total_area_label
snake_case = np.nanmean(a )
snake_case = np.nanmean(a )
snake_case = all_acc
snake_case = iou
snake_case = acc
if nan_to_num is not None:
snake_case = {metric: np.nan_to_num(a , nan=a ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def UpperCamelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ = None , A__ = None , A__ = False , ) -> Optional[Any]:
snake_case = mean_iou(
results=A__ , gt_seg_maps=A__ , num_labels=A__ , ignore_index=A__ , nan_to_num=A__ , label_map=A__ , reduce_labels=A__ , )
return iou_result
| 44
|
'''simple docstring'''
_lowercase = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 1
|
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
_lowercase = False
_lowercase = True
_lowercase = False
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
_lowercase = parser.parse_args()
_lowercase = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
_lowercase = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
_lowercase = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
_lowercase = reader.read()
_lowercase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
_lowercase = UNetaDModel(**config)
else:
_lowercase = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
_lowercase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
_lowercase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
_lowercase = config[key]
del config[key]
_lowercase = [k.replace('UNetRes', '') for k in config['down_block_types']]
_lowercase = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
_lowercase = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
_lowercase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
_lowercase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
_lowercase = param_value
_lowercase = True
if not has_changed:
_lowercase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 44
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = IFInpaintingSuperResolutionPipeline
_UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase ( self ) -> int:
return self._get_superresolution_dummy_components()
def UpperCamelCase ( self , A__ , A__=0 ) -> Union[str, Any]:
if str(A__ ).startswith('''mps''' ):
snake_case = torch.manual_seed(A__ )
else:
snake_case = torch.Generator(device=A__ ).manual_seed(A__ )
snake_case = floats_tensor((1, 3, 16, 16) , rng=random.Random(A__ ) ).to(A__ )
snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def UpperCamelCase ( self ) -> List[str]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase ( self ) -> Optional[Any]:
self._test_save_load_local()
def UpperCamelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 44
| 1
|
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Any , a : Optional[Any]=None , a : Union[str, Any]=None , a : Any=None , a : Tuple=None , a : Dict=None , ) ->Optional[int]:
if attention_mask is None:
snake_case = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=a )
if decoder_head_mask is None:
snake_case = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a )
if cross_attn_head_mask is None:
snake_case = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class _lowercase :
def __init__( self , A__ , A__=13 , A__=7 , A__=True , A__=False , A__=99 , A__=16 , A__=2 , A__=4 , A__=4 , A__="relu" , A__=0.1 , A__=0.1 , A__=0.0 , A__=0.0 , A__=20 , A__=2 , A__=1 , A__=0 , ) -> List[str]:
snake_case = parent
snake_case = batch_size
snake_case = seq_length
snake_case = is_training
snake_case = use_labels
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = max_position_embeddings
snake_case = eos_token_id
snake_case = pad_token_id
snake_case = bos_token_id
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case = self.eos_token_id # Eos Token
snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case = input_ids.clamp(self.pad_token_id + 1 )
snake_case = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case = self.get_config()
snake_case = prepare_mam_aaa_inputs_dict(A__ , A__ , A__ )
return config, inputs_dict
def UpperCamelCase ( self ) -> int:
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case , snake_case = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCamelCase ( self , A__ , A__ ) -> List[Any]:
snake_case = MaMaaaModel(config=A__ ).get_decoder().to(A__ ).eval()
snake_case = inputs_dict['''input_ids''']
snake_case = inputs_dict['''attention_mask''']
snake_case = inputs_dict['''head_mask''']
# first forward pass
snake_case = model(A__ , attention_mask=A__ , head_mask=A__ , use_cache=A__ )
snake_case , snake_case = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case = model(A__ , attention_mask=A__ )['''last_hidden_state''']
snake_case = model(A__ , attention_mask=A__ , past_key_values=A__ )[
'''last_hidden_state'''
]
# select random slice
snake_case = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A__ , A__ , atol=1e-2 ) )
def UpperCamelCase ( self , A__ , A__ ) -> str:
snake_case = MaMaaaModel(config=A__ ).to(A__ ).eval()
snake_case = model(**A__ )
snake_case = outputs.encoder_last_hidden_state
snake_case = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = model.get_encoder()
encoder.save_pretrained(A__ )
snake_case = MaMaaaEncoder.from_pretrained(A__ ).to(A__ )
snake_case = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case = model.get_decoder()
decoder.save_pretrained(A__ )
snake_case = MaMaaaDecoder.from_pretrained(A__ ).to(A__ )
snake_case = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=A__ , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class _lowercase ( __a , __a , __a , unittest.TestCase ):
_UpperCAmelCase = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
_UpperCAmelCase = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ ) -> Any:
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def UpperCamelCase ( self ) -> List[Any]:
snake_case = MaMaaaModelTester(self )
snake_case = ConfigTester(self , config_class=A__ )
def UpperCamelCase ( self ) -> Tuple:
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> str:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A__ )
snake_case , snake_case = model_class.from_pretrained(A__ , output_loading_info=A__ )
self.assertEqual(info['''missing_keys'''] , [] )
def UpperCamelCase ( self ) -> str:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*A__ )
def UpperCamelCase ( self ) -> List[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case = model_class(A__ )
model.to(A__ )
model.eval()
snake_case = copy.deepcopy(self._prepare_for_class(A__ , A__ ) )
if not self.is_encoder_decoder:
snake_case = inputs['''input_ids''']
del inputs["input_ids"]
else:
snake_case = inputs['''input_ids''']
snake_case = inputs.get('''decoder_input_ids''' , A__ )
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' , A__ )
snake_case = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case = wte(A__ )
else:
snake_case = wte(A__ )
snake_case = wte(A__ )
with torch.no_grad():
model(**A__ )[0]
def UpperCamelCase ( self ) -> int:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs()
snake_case = input_dict['''input_ids''']
snake_case = input_ids.ne(1 ).to(A__ )
snake_case = MaMaaaForConditionalGeneration(A__ ).eval().to(A__ )
if torch_device == "cuda":
model.half()
model.generate(A__ , attention_mask=A__ )
model.generate(num_beams=4 , do_sample=A__ , early_stopping=A__ , num_return_sequences=3 )
def __UpperCamelCase ( a : List[str] ) ->Any:
return torch.tensor(a , dtype=torch.long , device=a )
_lowercase = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class _lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ) -> Union[str, Any]:
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(A__ )
snake_case = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
snake_case = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
snake_case = prepare_mam_aaa_inputs_dict(model.config , A__ , A__ )
with torch.no_grad():
snake_case = model(**A__ )[0]
snake_case = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , A__ )
# change to expected output here
snake_case = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=A__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=A__ ) )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(A__ )
# change to intended input
snake_case = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
snake_case = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
snake_case = prepare_mam_aaa_inputs_dict(model.config , A__ , A__ )
with torch.no_grad():
snake_case = model(**A__ )[0]
snake_case = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , A__ )
# change to expected output here
snake_case = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=A__ )
self.assertTrue(torch.allclose(output[:, :3, :3] , A__ , atol=A__ ) )
def UpperCamelCase ( self ) -> Tuple:
snake_case = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(A__ )
snake_case = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''' )
snake_case = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case = tokenizer(A__ , padding=A__ , return_tensors='''pt''' )
snake_case = model.generate(
input_ids=dct['''input_ids'''].to(A__ ) , attention_mask=dct['''attention_mask'''].to(A__ ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''' ) , )
snake_case = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
snake_case = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=A__ , skip_special_tokens=A__ )
assert generated == expected_en
| 44
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase = logging.get_logger(__name__)
class _lowercase ( __a ):
def __init__( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]:
snake_case = feature_size
snake_case = sampling_rate
snake_case = padding_value
snake_case = kwargs.pop('''padding_side''' , '''right''' )
snake_case = kwargs.pop('''return_attention_mask''' , A__ )
super().__init__(**A__ )
def UpperCamelCase ( self , A__ , A__ = True , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
snake_case = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
snake_case = processed_features[self.model_input_names[0]]
snake_case = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A__ ) == 0:
if return_attention_mask:
snake_case = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
snake_case = required_input[0]
if isinstance(A__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
snake_case = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A__ ):
snake_case = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A__ ):
snake_case = '''tf'''
elif is_torch_tensor(A__ ):
snake_case = '''pt'''
elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ):
snake_case = '''np'''
else:
raise ValueError(
F"""type of {first_element} unknown: {type(A__ )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
snake_case = to_numpy(A__ )
else:
snake_case = [to_numpy(A__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
snake_case = self._get_padding_strategies(padding=A__ , max_length=A__ )
snake_case = processed_features[self.model_input_names[0]]
snake_case = len(A__ )
if not all(len(A__ ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
snake_case = []
for i in range(A__ ):
snake_case = {k: v[i] for k, v in processed_features.items()}
# truncation
snake_case = self._truncate(
A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , )
truncated_inputs.append(A__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
snake_case = PaddingStrategy.MAX_LENGTH
snake_case = {}
for i in range(A__ ):
# padding
snake_case = self._pad(
truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , )
for key, value in outputs.items():
if key not in batch_outputs:
snake_case = []
if value.dtype is np.dtype(np.floataa ):
snake_case = value.astype(np.floataa )
batch_outputs[key].append(A__ )
return BatchFeature(A__ , tensor_type=A__ )
def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> dict:
snake_case = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
snake_case = len(A__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
snake_case = np.ones(len(A__ ) , dtype=np.intaa )
if needs_to_be_padded:
snake_case = max_length - len(A__ )
if self.padding_side == "right":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
snake_case = np.pad(
A__ , A__ , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
snake_case = np.pad(
A__ , A__ , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , ) -> Union[str, Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
snake_case = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = len(A__ ) > max_length
if needs_to_be_truncated:
snake_case = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
snake_case = processed_features['''attention_mask'''][:max_length]
return processed_features
def UpperCamelCase ( self , A__=False , A__=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A__ , A__ ):
snake_case = PaddingStrategy(A__ )
elif isinstance(A__ , A__ ):
snake_case = padding
else:
snake_case = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 44
| 1
|
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def __UpperCamelCase ( a : Optional[Any] ) ->str:
snake_case = torch.exp(a )
snake_case = torch.sum(a , dim=1 ) # sum of exp(x_i)
snake_case = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(a ) - B / A
class _lowercase ( nn.Module ):
def __init__( self , A__ ) -> Any:
super().__init__()
snake_case = config.output_attentions
snake_case = config.output_hidden_states
snake_case = nn.ModuleList([BertLayer(A__ ) for _ in range(config.num_hidden_layers )] )
snake_case = nn.ModuleList([BertHighway(A__ ) for _ in range(config.num_hidden_layers )] )
snake_case = [-1 for _ in range(config.num_hidden_layers )]
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
if (type(A__ ) is float) or (type(A__ ) is int):
for i in range(len(self.early_exit_entropy ) ):
snake_case = x
else:
snake_case = x
def UpperCamelCase ( self , A__ ) -> Optional[Any]:
snake_case = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCamelCase ( self , A__ , A__=None , A__=None , A__=None , A__=None , ) -> int:
snake_case = ()
snake_case = ()
snake_case = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
snake_case = all_hidden_states + (hidden_states,)
snake_case = layer_module(
A__ , A__ , head_mask[i] , A__ , A__ )
snake_case = layer_outputs[0]
if self.output_attentions:
snake_case = all_attentions + (layer_outputs[1],)
snake_case = (hidden_states,)
if self.output_hidden_states:
snake_case = current_outputs + (all_hidden_states,)
if self.output_attentions:
snake_case = current_outputs + (all_attentions,)
snake_case = self.highway[i](A__ )
# logits, pooled_output
if not self.training:
snake_case = highway_exit[0]
snake_case = entropy(A__ )
snake_case = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
snake_case = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
snake_case = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A__ , i + 1 )
else:
snake_case = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
snake_case = all_hidden_states + (hidden_states,)
snake_case = (hidden_states,)
if self.output_hidden_states:
snake_case = outputs + (all_hidden_states,)
if self.output_attentions:
snake_case = outputs + (all_attentions,)
snake_case = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , __a , )
class _lowercase ( __a ):
def __init__( self , A__ ) -> str:
super().__init__(A__ )
snake_case = config
snake_case = BertEmbeddings(A__ )
snake_case = DeeBertEncoder(A__ )
snake_case = BertPooler(A__ )
self.init_weights()
def UpperCamelCase ( self ) -> Dict:
self.encoder.init_highway_pooler(self.pooler )
def UpperCamelCase ( self ) -> str:
return self.embeddings.word_embeddings
def UpperCamelCase ( self , A__ ) -> Any:
snake_case = value
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A__ )
@add_start_docstrings_to_model_forward(A__ )
def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , ) -> str:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
snake_case = input_ids.size()
elif inputs_embeds is not None:
snake_case = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
snake_case = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
snake_case = torch.ones(A__ , device=A__ )
if encoder_attention_mask is None:
snake_case = torch.ones(A__ , device=A__ )
if token_type_ids is None:
snake_case = torch.zeros(A__ , dtype=torch.long , device=A__ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
snake_case = self.get_extended_attention_mask(A__ , A__ , A__ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
snake_case = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
snake_case = encoder_attention_mask[:, None, None, :]
snake_case = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
snake_case = (1.0 - encoder_extended_attention_mask) * -1_0_0_0_0.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
snake_case = self.get_head_mask(A__ , self.config.num_hidden_layers )
snake_case = self.embeddings(
input_ids=A__ , position_ids=A__ , token_type_ids=A__ , inputs_embeds=A__ )
snake_case = self.encoder(
A__ , attention_mask=A__ , head_mask=A__ , encoder_hidden_states=A__ , encoder_attention_mask=A__ , )
snake_case = encoder_outputs[0]
snake_case = self.pooler(A__ )
snake_case = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class _lowercase ( __a ):
def __init__( self , A__ , A__ ) -> Any:
snake_case = message
snake_case = exit_layer # start from 1!
class _lowercase ( nn.Module ):
def __init__( self , A__ ) -> str:
super().__init__()
snake_case = BertPooler(A__ )
snake_case = nn.Dropout(config.hidden_dropout_prob )
snake_case = nn.Linear(config.hidden_size , config.num_labels )
def UpperCamelCase ( self , A__ ) -> Optional[Any]:
# Pooler
snake_case = encoder_outputs[0]
snake_case = self.pooler(A__ )
# "return" pooler_output
# BertModel
snake_case = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
snake_case = bmodel_output[1]
snake_case = self.dropout(A__ )
snake_case = self.classifier(A__ )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , __a , )
class _lowercase ( __a ):
def __init__( self , A__ ) -> Union[str, Any]:
super().__init__(A__ )
snake_case = config.num_labels
snake_case = config.num_hidden_layers
snake_case = DeeBertModel(A__ )
snake_case = nn.Dropout(config.hidden_dropout_prob )
snake_case = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(A__ )
def UpperCamelCase ( self , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=None , A__=-1 , A__=False , ) -> Tuple:
snake_case = self.num_layers
try:
snake_case = self.bert(
A__ , attention_mask=A__ , token_type_ids=A__ , position_ids=A__ , head_mask=A__ , inputs_embeds=A__ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
snake_case = outputs[1]
snake_case = self.dropout(A__ )
snake_case = self.classifier(A__ )
snake_case = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
snake_case = e.message
snake_case = e.exit_layer
snake_case = outputs[0]
if not self.training:
snake_case = entropy(A__ )
snake_case = []
snake_case = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
snake_case = MSELoss()
snake_case = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case = CrossEntropyLoss()
snake_case = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
snake_case = []
for highway_exit in outputs[-1]:
snake_case = highway_exit[0]
if not self.training:
highway_logits_all.append(A__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
snake_case = MSELoss()
snake_case = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
snake_case = CrossEntropyLoss()
snake_case = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(A__ )
if train_highway:
snake_case = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
snake_case = (loss,) + outputs
if not self.training:
snake_case = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
snake_case = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 44
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _lowercase ( yaml.SafeLoader ):
def UpperCamelCase ( self , A__ ) -> List[str]:
snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value]
snake_case = [tuple(A__ ) if isinstance(A__ , A__ ) else key for key in keys]
snake_case = Counter(A__ )
snake_case = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCamelCase ( self , A__ , A__=False ) -> List[Any]:
snake_case = super().construct_mapping(A__ , deep=A__ )
self._check_no_duplicates_on_constructed_node(A__ )
return mapping
def __UpperCamelCase ( a : str ) ->Tuple[Optional[str], str]:
snake_case = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
snake_case = full_content[1:].index('''---''' ) + 1
snake_case = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class _lowercase ( __a ):
# class attributes
_UpperCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case , snake_case = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(A__ )
else:
return cls()
def UpperCamelCase ( self , A__ ) -> str:
if path.exists():
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case = readme_file.read()
else:
snake_case = None
snake_case = self._to_readme(A__ )
with open(A__ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(A__ )
def UpperCamelCase ( self , A__ = None ) -> str:
if readme_content is not None:
snake_case , snake_case = _split_yaml_from_readme(A__ )
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
snake_case = yaml.load(A__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
snake_case = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**A__ )
def UpperCamelCase ( self ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=A__ , allow_unicode=A__ , encoding='''utf-8''' , ).decode('''utf-8''' )
_lowercase = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowercase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
_lowercase = ap.parse_args()
_lowercase = Path(args.readme_filepath)
_lowercase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 44
| 1
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 44
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = CodeGenTokenizer
_UpperCAmelCase = CodeGenTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = {'''add_prefix_space''': True}
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
snake_case = dict(zip(A__ , range(len(A__ ) ) ) )
snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case = {'''unk_token''': '''<unk>'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
def UpperCamelCase ( self , **A__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , **A__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , A__ ) -> Tuple:
snake_case = '''lower newer'''
snake_case = '''lower newer'''
return input_text, output_text
def UpperCamelCase ( self ) -> List[Any]:
snake_case = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = '''lower newer'''
snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ )
self.assertListEqual(A__ , A__ )
snake_case = tokens + [tokenizer.unk_token]
snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer(add_prefix_space=A__ )
snake_case = '''lower newer'''
# Testing tokenization
snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids without special tokens
snake_case = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids with special tokens
snake_case = self.get_rust_tokenizer(add_prefix_space=A__ )
snake_case = tokenizer.encode(A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# Testing the unknown token
snake_case = tokens + [rust_tokenizer.unk_token]
snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> List[str]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCamelCase ( self , A__=15 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
def UpperCamelCase ( self ) -> Tuple:
snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input looooooooong''', '''This is a simple input''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
snake_case = tokenizer.pad_token_id
snake_case = tokenizer(A__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' )
snake_case = tokenizer(*A__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def UpperCamelCase ( self ) -> str:
snake_case = '''$$$'''
snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A__ , add_bos_token=A__ )
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = tokenizer.bos_token_id
snake_case = tokenizer(A__ )
snake_case = tokenizer(A__ )
self.assertEqual(out_s.input_ids[0] , A__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case = tokenizer.decode(out_s.input_ids )
snake_case = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
snake_case = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
snake_case = '''\nif len_a > len_b: result = a\nelse: result = b'''
snake_case = tokenizer.encode(A__ )
snake_case = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
snake_case = tokenizer.decode(A__ , truncate_before_pattern=A__ )
self.assertEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
pass
| 44
| 1
|
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __UpperCamelCase ( a : Any , a : Any=0.999 , a : Any="cosine" , ) ->List[str]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(a : Union[str, Any] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a : Tuple ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
snake_case = []
for i in range(a ):
snake_case = i / num_diffusion_timesteps
snake_case = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a ) / alpha_bar_fn(a ) , a ) )
return torch.tensor(a , dtype=torch.floataa )
class _lowercase ( __a , __a ):
_UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers]
_UpperCAmelCase = 2
@register_to_config
def __init__( self , A__ = 10_00 , A__ = 0.0_0_0_8_5 , A__ = 0.0_1_2 , A__ = "linear" , A__ = None , A__ = "epsilon" , A__ = "linspace" , A__ = 0 , ) -> List[Any]:
if trained_betas is not None:
snake_case = torch.tensor(A__ , dtype=torch.floataa )
elif beta_schedule == "linear":
snake_case = torch.linspace(A__ , A__ , A__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
snake_case = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , A__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
snake_case = betas_for_alpha_bar(A__ )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
snake_case = 1.0 - self.betas
snake_case = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(A__ , A__ , A__ )
def UpperCamelCase ( self , A__ , A__=None ) -> List[str]:
if schedule_timesteps is None:
snake_case = self.timesteps
snake_case = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
snake_case = 1 if len(A__ ) > 1 else 0
else:
snake_case = timestep.cpu().item() if torch.is_tensor(A__ ) else timestep
snake_case = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self ) -> List[str]:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self , A__ , A__ , ) -> torch.FloatTensor:
snake_case = self.index_for_timestep(A__ )
if self.state_in_first_order:
snake_case = self.sigmas[step_index]
else:
snake_case = self.sigmas_interpol[step_index]
snake_case = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , ) -> Any:
snake_case = num_inference_steps
snake_case = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
snake_case = np.linspace(0 , num_train_timesteps - 1 , A__ , dtype=A__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
snake_case = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case = (np.arange(0 , A__ ) * step_ratio).round()[::-1].copy().astype(A__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
snake_case = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
snake_case = (np.arange(A__ , 0 , -step_ratio )).round().copy().astype(A__ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
snake_case = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
snake_case = torch.from_numpy(np.log(A__ ) ).to(A__ )
snake_case = np.interp(A__ , np.arange(0 , len(A__ ) ) , A__ )
snake_case = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
snake_case = torch.from_numpy(A__ ).to(device=A__ )
# interpolate sigmas
snake_case = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
snake_case = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
snake_case = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(A__ ).startswith('''mps''' ):
# mps does not support float64
snake_case = torch.from_numpy(A__ ).to(A__ , dtype=torch.floataa )
else:
snake_case = torch.from_numpy(A__ ).to(A__ )
# interpolate timesteps
snake_case = self.sigma_to_t(A__ ).to(A__ , dtype=timesteps.dtype )
snake_case = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
snake_case = torch.cat([timesteps[:1], interleaved_timesteps] )
snake_case = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
snake_case = defaultdict(A__ )
def UpperCamelCase ( self , A__ ) -> int:
# get log sigma
snake_case = sigma.log()
# get distribution
snake_case = log_sigma - self.log_sigmas[:, None]
# get sigmas range
snake_case = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
snake_case = low_idx + 1
snake_case = self.log_sigmas[low_idx]
snake_case = self.log_sigmas[high_idx]
# interpolate sigmas
snake_case = (low - log_sigma) / (low - high)
snake_case = w.clamp(0 , 1 )
# transform interpolation to time range
snake_case = (1 - w) * low_idx + w * high_idx
snake_case = t.view(sigma.shape )
return t
@property
def UpperCamelCase ( self ) -> List[str]:
return self.sample is None
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = True , ) -> Union[SchedulerOutput, Tuple]:
snake_case = self.index_for_timestep(A__ )
# advance index counter by 1
snake_case = timestep.cpu().item() if torch.is_tensor(A__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
snake_case = self.sigmas[step_index]
snake_case = self.sigmas_interpol[step_index + 1]
snake_case = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
snake_case = self.sigmas[step_index - 1]
snake_case = self.sigmas_interpol[step_index]
snake_case = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
snake_case = 0
snake_case = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
snake_case = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
snake_case = sigma_hat if self.state_in_first_order else sigma_interpol
snake_case = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
snake_case = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
snake_case = sigma_interpol - sigma_hat
# store for 2nd order step
snake_case = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
snake_case = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
snake_case = sigma_next - sigma_hat
snake_case = self.sample
snake_case = None
snake_case = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=A__ )
def UpperCamelCase ( self , A__ , A__ , A__ , ) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(A__ ):
# mps does not support float64
snake_case = self.timesteps.to(original_samples.device , dtype=torch.floataa )
snake_case = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
snake_case = self.timesteps.to(original_samples.device )
snake_case = timesteps.to(original_samples.device )
snake_case = [self.index_for_timestep(A__ , A__ ) for t in timesteps]
snake_case = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
snake_case = sigma.unsqueeze(-1 )
snake_case = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 44
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , ) -> List[Any]:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def UpperCamelCase ( self ) -> int:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> int:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = TFViTModel(config=A__ )
snake_case = model(A__ , training=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]:
snake_case = self.type_sequence_label_size
snake_case = TFViTForImageClassification(A__ )
snake_case = model(A__ , labels=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = TFViTForImageClassification(A__ )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> List[Any]:
snake_case = TFViTModelTester(self )
snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> str:
pass
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) )
def UpperCamelCase ( self ) -> List[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(A__ )
def __UpperCamelCase ( ) ->Any:
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=A__ , return_tensors='''tf''' )
# forward pass
snake_case = model(**A__ )
# verify the logits
snake_case = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A__ )
snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
| 44
| 1
|
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
_lowercase = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
_lowercase = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
_lowercase = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
def UpperCamelCase ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def UpperCamelCase ( self , A__ , A__ , A__=False ) -> str:
snake_case = spearmanr(A__ , A__ )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 44
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_lowercase = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def __UpperCamelCase ( a : Dict=True ) ->str:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__a ) )
class _lowercase ( __a ):
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCamelCase ( self , A__ , A__ ) -> str:
with TemporaryDirectory() as tmp_dir:
snake_case = dataset_module_factory(A__ , cache_dir=A__ )
snake_case = import_main_class(dataset_module.module_path , dataset=A__ )
snake_case = builder_cls(
cache_dir=A__ , config_name=A__ , hash=dataset_module.hash , )
snake_case = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=A__ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
snake_case = cached_path(A__ , cache_dir=A__ )
self.assertTrue(os.path.exists(A__ ) )
@pytest.mark.integration
def __UpperCamelCase ( a : List[str] ) ->Any:
snake_case = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a )
snake_case = import_main_class(dataset_module.module_path )
snake_case = builder_cls(
cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
snake_case = None
builder_instance.download_and_prepare()
snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __UpperCamelCase ( a : Any ) ->Union[str, Any]:
snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a )
snake_case = import_main_class(dataset_module.module_path , dataset=a )
snake_case = builder_cls(
cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , )
snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a , a )
assert "train" in ds
assert isinstance(ds['''train'''] , a )
assert next(iter(ds['''train'''] ) )
| 44
| 1
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_lowercase = logging.get_logger(__name__)
_lowercase = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _lowercase ( __a ):
def __init__( self , A__=None , A__=None , *A__ , **A__ ) -> Union[str, Any]:
super().__init__(*A__ , **A__ )
if config is None:
assert isinstance(self.model , A__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
snake_case = self.model.config
else:
snake_case = config
snake_case = data_args
snake_case = self.config.tgt_vocab_size if isinstance(self.config , A__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case = label_smoothed_nll_loss
def UpperCamelCase ( self , A__ ) -> Tuple:
if self.optimizer is None:
snake_case = ['''bias''', '''LayerNorm.weight''']
snake_case = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
snake_case = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case = Adafactor
snake_case = {'''scale_parameter''': False, '''relative_step''': False}
else:
snake_case = AdamW
snake_case = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
snake_case = self.args.learning_rate
if self.sharded_ddp:
snake_case = OSS(
params=A__ , optim=A__ , **A__ , )
else:
snake_case = optimizer_cls(A__ , **A__ )
if self.lr_scheduler is None:
snake_case = self._get_lr_scheduler(A__ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def UpperCamelCase ( self , A__ ) -> Tuple:
snake_case = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A__ )
return scheduler
def UpperCamelCase ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case = model(**A__ , use_cache=A__ )[0]
snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case , snake_case = model(**A__ , labels=A__ , use_cache=A__ )[:2]
else:
# compute label smoothed loss
snake_case = model(**A__ , use_cache=A__ )[0]
snake_case = torch.nn.functional.log_softmax(A__ , dim=-1 )
snake_case , snake_case = self.loss_fn(A__ , A__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase ( self , A__ , A__ ) -> Any:
snake_case = inputs.pop('''labels''' )
snake_case , snake_case = self._compute_loss(A__ , A__ , A__ )
return loss
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
snake_case = self._prepare_inputs(A__ )
snake_case = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **A__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] )
snake_case = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
snake_case , snake_case = self._compute_loss(A__ , A__ , A__ )
snake_case = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def UpperCamelCase ( self , A__ , A__ ) -> List[str]:
# If PAD token is not defined at least EOS token has to be defined
snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F""" padded to `max_length`={max_length}""" )
snake_case = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case = tensor
return padded_tensor
| 44
|
'''simple docstring'''
def __UpperCamelCase ( a : int , a : int ) ->int:
while b:
snake_case , snake_case = b, a % b
return a
def __UpperCamelCase ( a : int , a : int ) ->int:
return a if b == 0 else euclidean_gcd_recursive(a , a % b )
def __UpperCamelCase ( ) ->Optional[Any]:
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 44
| 1
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
_UpperCAmelCase = 42
# setable values
_UpperCAmelCase = 42
_UpperCAmelCase = 42
_UpperCAmelCase = None
@classmethod
def UpperCamelCase ( cls , A__ , A__ , A__ ) -> Optional[Any]:
return cls(common=A__ , init_noise_sigma=A__ , timesteps=A__ )
@dataclass
class _lowercase ( __a ):
_UpperCAmelCase = 42
class _lowercase ( __a , __a ):
_UpperCAmelCase = [e.name for e in FlaxKarrasDiffusionSchedulers]
_UpperCAmelCase = 42
@property
def UpperCamelCase ( self ) -> List[Any]:
return True
@register_to_config
def __init__( self , A__ = 10_00 , A__ = 0.0_0_0_1 , A__ = 0.0_2 , A__ = "linear" , A__ = None , A__ = "fixed_small" , A__ = True , A__ = "epsilon" , A__ = jnp.floataa , ) -> str:
snake_case = dtype
def UpperCamelCase ( self , A__ = None ) -> DDPMSchedulerState:
if common is None:
snake_case = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
snake_case = jnp.array(1.0 , dtype=self.dtype )
snake_case = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=A__ , init_noise_sigma=A__ , timesteps=A__ , )
def UpperCamelCase ( self , A__ , A__ , A__ = None ) -> jnp.ndarray:
return sample
def UpperCamelCase ( self , A__ , A__ , A__ = () ) -> DDPMSchedulerState:
snake_case = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
snake_case = (jnp.arange(0 , A__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=A__ , timesteps=A__ , )
def UpperCamelCase ( self , A__ , A__ , A__=None , A__=None ) -> List[str]:
snake_case = state.common.alphas_cumprod[t]
snake_case = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
snake_case = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
snake_case = jnp.clip(A__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
snake_case = jnp.log(jnp.clip(A__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
snake_case = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
snake_case = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
snake_case = variance
snake_case = state.common.betas[t]
snake_case = (predicted_variance + 1) / 2
snake_case = frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , A__ = None , A__ = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]:
snake_case = timestep
if key is None:
snake_case = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
snake_case , snake_case = jnp.split(A__ , sample.shape[1] , axis=1 )
else:
snake_case = None
# 1. compute alphas, betas
snake_case = state.common.alphas_cumprod[t]
snake_case = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
snake_case = 1 - alpha_prod_t
snake_case = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case = model_output
elif self.config.prediction_type == "v_prediction":
snake_case = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
''' for the FlaxDDPMScheduler.''' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case = jnp.clip(A__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
snake_case = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
snake_case = jax.random.split(A__ , num=1 )
snake_case = jax.random.normal(A__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(A__ , A__ , predicted_variance=A__ ) ** 0.5) * noise
snake_case = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
snake_case = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=A__ , state=A__ )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , ) -> jnp.ndarray:
return add_noise_common(state.common , A__ , A__ , A__ )
def UpperCamelCase ( self , A__ , A__ , A__ , A__ , ) -> jnp.ndarray:
return get_velocity_common(state.common , A__ , A__ , A__ )
def __len__( self ) -> Tuple:
return self.config.num_train_timesteps
| 44
|
'''simple docstring'''
import argparse
import copy
def __UpperCamelCase ( a : Union[str, Any] ) ->Tuple:
snake_case = {}
with open(a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[1], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[0], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __UpperCamelCase ( a : Dict , a : Tuple ) ->int:
with open(a ) as f:
snake_case = f.read(1 )
snake_case = start_node
snake_case = []
snake_case = start_node
snake_case = 0
while visiting not in first_solution:
snake_case = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(a ) and k[0] not in first_solution:
snake_case = k[1]
snake_case = k[0]
first_solution.append(a )
snake_case = distance_of_first_solution + int(a )
snake_case = best_node
first_solution.append(a )
snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __UpperCamelCase ( a : Optional[int] , a : str ) ->str:
snake_case = []
for n in solution[1:-1]:
snake_case = solution.index(a )
for kn in solution[1:-1]:
snake_case = solution.index(a )
if n == kn:
continue
snake_case = copy.deepcopy(a )
snake_case = kn
snake_case = n
snake_case = 0
for k in _tmp[:-1]:
snake_case = _tmp[_tmp.index(a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case = distance + int(i[1] )
_tmp.append(a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __UpperCamelCase ( a : Any , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] ) ->List[Any]:
snake_case = 1
snake_case = first_solution
snake_case = []
snake_case = distance_of_first_solution
snake_case = solution
while count <= iters:
snake_case = find_neighborhood(a , a )
snake_case = 0
snake_case = neighborhood[index_of_best_solution]
snake_case = len(a ) - 1
snake_case = False
while not found:
snake_case = 0
while i < len(a ):
if best_solution[i] != solution[i]:
snake_case = best_solution[i]
snake_case = solution[i]
break
snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case = True
snake_case = best_solution[:-1]
snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case = cost
snake_case = solution
else:
snake_case = index_of_best_solution + 1
snake_case = neighborhood[index_of_best_solution]
if len(a ) >= size:
tabu_list.pop(0 )
snake_case = count + 1
return best_solution_ever, best_cost
def __UpperCamelCase ( a : Union[str, Any]=None ) ->Optional[Any]:
snake_case = generate_neighbours(args.File )
snake_case , snake_case = generate_first_solution(
args.File , a )
snake_case , snake_case = tabu_search(
a , a , a , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 44
| 1
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _lowercase ( __a ):
_UpperCAmelCase = '''Salesforce/blip-image-captioning-base'''
_UpperCAmelCase = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
_UpperCAmelCase = '''image_captioner'''
_UpperCAmelCase = AutoModelForVisionaSeq
_UpperCAmelCase = ['''image''']
_UpperCAmelCase = ['''text''']
def __init__( self , *A__ , **A__ ) -> Optional[Any]:
requires_backends(self , ['''vision'''] )
super().__init__(*A__ , **A__ )
def UpperCamelCase ( self , A__ ) -> List[Any]:
return self.pre_processor(images=A__ , return_tensors='''pt''' )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
return self.model.generate(**A__ )
def UpperCamelCase ( self , A__ ) -> List[Any]:
return self.pre_processor.batch_decode(A__ , skip_special_tokens=A__ )[0].strip()
| 44
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 44
| 1
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __UpperCamelCase ( a : str , a : str ) ->str | Literal[False]:
snake_case = list(a )
snake_case = list(a )
snake_case = 0
for i in range(len(a ) ):
if lista[i] != lista[i]:
count += 1
snake_case = '''_'''
if count > 1:
return False
else:
return "".join(a )
def __UpperCamelCase ( a : list[str] ) ->list[str]:
snake_case = []
while True:
snake_case = ['''$'''] * len(a )
snake_case = []
for i in range(len(a ) ):
for j in range(i + 1 , len(a ) ):
snake_case = compare_string(binary[i] , binary[j] )
if k is False:
snake_case = '''*'''
snake_case = '''*'''
temp.append('''X''' )
for i in range(len(a ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a ) == 0:
return pi
snake_case = list(set(a ) )
def __UpperCamelCase ( a : int , a : Sequence[float] ) ->list[str]:
snake_case = []
for minterm in minterms:
snake_case = ''''''
for _ in range(a ):
snake_case = str(minterm % 2 ) + string
minterm //= 2
temp.append(a )
return temp
def __UpperCamelCase ( a : str , a : str , a : int ) ->bool:
snake_case = list(a )
snake_case = list(a )
snake_case = 0
for i in range(len(a ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __UpperCamelCase ( a : list[list[int]] , a : list[str] ) ->list[str]:
snake_case = []
snake_case = [0] * len(a )
for i in range(len(chart[0] ) ):
snake_case = 0
snake_case = -1
for j in range(len(a ) ):
if chart[j][i] == 1:
count += 1
snake_case = j
if count == 1:
snake_case = 1
for i in range(len(a ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a ) ):
snake_case = 0
temp.append(prime_implicants[i] )
while True:
snake_case = 0
snake_case = -1
snake_case = 0
for i in range(len(a ) ):
snake_case = chart[i].count(1 )
if count_n > max_n:
snake_case = count_n
snake_case = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a ) ):
snake_case = 0
def __UpperCamelCase ( a : list[str] , a : list[str] ) ->list[list[int]]:
snake_case = [[0 for x in range(len(a ) )] for x in range(len(a ) )]
for i in range(len(a ) ):
snake_case = prime_implicants[i].count('''_''' )
for j in range(len(a ) ):
if is_for_table(prime_implicants[i] , binary[j] , a ):
snake_case = 1
return chart
def __UpperCamelCase ( ) ->None:
snake_case = int(input('''Enter the no. of variables\n''' ) )
snake_case = [
float(a )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
snake_case = decimal_to_binary(a , a )
snake_case = check(a )
print('''Prime Implicants are:''' )
print(a )
snake_case = prime_implicant_chart(a , a )
snake_case = selection(a , a )
print('''Essential Prime Implicants are:''' )
print(a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 44
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowercase ( __a ):
_UpperCAmelCase = '''WhisperFeatureExtractor'''
_UpperCAmelCase = '''WhisperTokenizer'''
def __init__( self , A__ , A__ ) -> Optional[Any]:
super().__init__(A__ , A__ )
snake_case = self.feature_extractor
snake_case = False
def UpperCamelCase ( self , A__=None , A__=None , A__=True ) -> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=A__ , language=A__ , no_timestamps=A__ )
def __call__( self , *A__ , **A__ ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
snake_case = kwargs.pop('''audio''' , A__ )
snake_case = kwargs.pop('''sampling_rate''' , A__ )
snake_case = kwargs.pop('''text''' , A__ )
if len(A__ ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
snake_case = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
snake_case = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> str:
return self.tokenizer.decode(*A__ , **A__ )
def UpperCamelCase ( self , A__ , A__="np" ) -> Optional[Any]:
return self.tokenizer.get_prompt_ids(A__ , return_tensors=A__ )
| 44
| 1
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowercase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowercase = logging.getLogger()
def __UpperCamelCase ( ) ->Tuple:
snake_case = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case = parser.parse_args()
return args.f
def __UpperCamelCase ( a : Dict , a : Tuple="eval" ) ->List[Any]:
snake_case = os.path.join(a , f"""{split}_results.json""" )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
return json.load(a )
raise ValueError(f"""can't find {path}""" )
_lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowercase ( __a ):
def UpperCamelCase ( self ) -> List[str]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_flax_glue.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_clm_flax.main()
snake_case = get_results(A__ )
self.assertLess(result['''eval_perplexity'''] , 1_00 )
@slow
def UpperCamelCase ( self ) -> int:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_summarization_flax.main()
snake_case = get_results(A__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_mlm_flax.main()
snake_case = get_results(A__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_ta_mlm_flax.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def UpperCamelCase ( self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case = 7 if get_gpu_count() > 1 else 2
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_flax_ner.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_qa.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 44
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowercase ( __a ):
_UpperCAmelCase = '''char'''
_UpperCAmelCase = '''bpe'''
_UpperCAmelCase = '''wp'''
_lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowercase ( __a ):
_UpperCAmelCase = ['''image_processor''', '''char_tokenizer''']
_UpperCAmelCase = '''ViTImageProcessor'''
_UpperCAmelCase = '''MgpstrTokenizer'''
def __init__( self , A__=None , A__=None , **A__ ) -> List[Any]:
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A__ , )
snake_case = kwargs.pop('''feature_extractor''' )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
snake_case = tokenizer
snake_case = AutoTokenizer.from_pretrained('''gpt2''' )
snake_case = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(A__ , A__ )
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> List[str]:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None:
snake_case = self.char_tokenizer(A__ , return_tensors=A__ , **A__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self , A__ ) -> Dict:
snake_case , snake_case , snake_case = sequences
snake_case = char_preds.size(0 )
snake_case , snake_case = self._decode_helper(A__ , '''char''' )
snake_case , snake_case = self._decode_helper(A__ , '''bpe''' )
snake_case , snake_case = self._decode_helper(A__ , '''wp''' )
snake_case = []
snake_case = []
for i in range(A__ ):
snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case = scores.index(max(A__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case = {}
snake_case = final_strs
snake_case = final_scores
snake_case = char_strs
snake_case = bpe_strs
snake_case = wp_strs
return out
def UpperCamelCase ( self , A__ , A__ ) -> Optional[Any]:
if format == DecodeType.CHARACTER:
snake_case = self.char_decode
snake_case = 1
snake_case = '''[s]'''
elif format == DecodeType.BPE:
snake_case = self.bpe_decode
snake_case = 2
snake_case = '''#'''
elif format == DecodeType.WORDPIECE:
snake_case = self.wp_decode
snake_case = 1_02
snake_case = '''[SEP]'''
else:
raise ValueError(F"""Format {format} is not supported.""" )
snake_case , snake_case = [], []
snake_case = pred_logits.size(0 )
snake_case = pred_logits.size(1 )
snake_case , snake_case = pred_logits.topk(1 , dim=-1 , largest=A__ , sorted=A__ )
snake_case = preds_index.view(-1 , A__ )[:, 1:]
snake_case = decoder(A__ )
snake_case , snake_case = torch.nn.functional.softmax(A__ , dim=2 ).max(dim=2 )
snake_case = preds_max_prob[:, 1:]
for index in range(A__ ):
snake_case = preds_str[index].find(A__ )
snake_case = preds_str[index][:pred_eos]
snake_case = preds_index[index].cpu().tolist()
snake_case = pred_index.index(A__ ) if eos_token in pred_index else -1
snake_case = preds_max_prob[index][: pred_eos_index + 1]
snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A__ )
conf_scores.append(A__ )
return dec_strs, conf_scores
def UpperCamelCase ( self , A__ ) -> int:
snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(A__ )]
return decode_strs
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.bpe_tokenizer.batch_decode(A__ )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(A__ )]
return decode_strs
| 44
| 1
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]:
snake_case = original_name.split('''.''' )[0]
snake_case = key.split('''.''' )
snake_case = int(key_list[key_list.index(a ) - 2] )
snake_case = int(key_list[key_list.index(a ) - 1] )
snake_case = orig_block_num - offset
snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def __UpperCamelCase ( a : Tuple ) ->Dict:
snake_case = OrderedDict()
snake_case , snake_case = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case = key[: key.find('''proj''' )]
snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" )
snake_case = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case = key.replace('''head''' , '''classifier''' )
snake_case = value
return new_state_dict
def __UpperCamelCase ( ) ->Optional[int]:
snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case = Image.open(requests.get(a , stream=a ).raw )
return image
@torch.no_grad()
def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]:
snake_case = PoolFormerConfig()
# set attributes based on model_name
snake_case = '''huggingface/label-files'''
snake_case = model_name[-3:]
snake_case = 1000
snake_case = '''imagenet-1k-id2label.json'''
snake_case = (1, 1000)
# set config attributes
snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
snake_case = {int(a ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case = [2, 2, 6, 2]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s24":
snake_case = [4, 4, 12, 4]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s36":
snake_case = [6, 6, 18, 6]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.9
elif size == "m36":
snake_case = [6, 6, 18, 6]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
elif size == "m48":
snake_case = [8, 8, 24, 8]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
# Prepare image
snake_case = prepare_img()
snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
snake_case = torch.load(a , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case = rename_keys(a )
# create HuggingFace model and load state dict
snake_case = PoolFormerForImageClassification(a )
model.load_state_dict(a )
model.eval()
# Define image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case = model(a )
snake_case = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
snake_case = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
snake_case = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
snake_case = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowercase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_lowercase , _lowercase , _lowercase = False, False, False
@dataclass
class _lowercase :
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = None
# Automatically constructed
_UpperCAmelCase = "dict"
_UpperCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_UpperCAmelCase = field(default='''Audio''' , init=__a , repr=__a )
def __call__( self ) -> Optional[Any]:
return self.pa_type
def UpperCamelCase ( self , A__ ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(A__ , A__ ):
return {"bytes": None, "path": value}
elif isinstance(A__ , A__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case = BytesIO()
sf.write(A__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
snake_case = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67
snake_case = BytesIO(bytes() )
sf.write(A__ , A__ , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def UpperCamelCase ( self , A__ , A__ = None ) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
snake_case , snake_case = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
snake_case = xsplitext(A__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
snake_case = token_per_repo_id or {}
snake_case = path.split('''::''' )[-1]
try:
snake_case = string_to_dict(A__ , config.HUB_DATASETS_URL )['''repo_id''']
snake_case = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case = None
with xopen(A__ , '''rb''' , use_auth_token=A__ ) as f:
snake_case , snake_case = sf.read(A__ )
else:
snake_case , snake_case = sf.read(A__ )
snake_case = array.T
if self.mono:
snake_case = librosa.to_mono(A__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case = librosa.resample(A__ , orig_sr=A__ , target_sr=self.sampling_rate )
snake_case = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def UpperCamelCase ( self , A__ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
snake_case = pa.array([None] * len(A__ ) , type=pa.binary() )
snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case = pa.array([None] * len(A__ ) , type=pa.string() )
snake_case = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
snake_case = pa.array([Audio().encode_example(A__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
snake_case = storage.field('''bytes''' )
else:
snake_case = pa.array([None] * len(A__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
snake_case = storage.field('''path''' )
else:
snake_case = pa.array([None] * len(A__ ) , type=pa.string() )
snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(A__ , self.pa_type )
def UpperCamelCase ( self , A__ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(A__ ):
with xopen(A__ , '''rb''' ) as f:
snake_case = f.read()
return bytes_
snake_case = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case = pa.array(
[os.path.basename(A__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(A__ , self.pa_type )
| 44
| 1
|
'''simple docstring'''
def __UpperCamelCase ( a : int , a : int ) ->int:
return x if y == 0 else greatest_common_divisor(a , x % y )
def __UpperCamelCase ( a : int , a : int ) ->int:
return (x * y) // greatest_common_divisor(a , a )
def __UpperCamelCase ( a : int = 20 ) ->int:
snake_case = 1
for i in range(1 , n + 1 ):
snake_case = lcm(a , a )
return g
if __name__ == "__main__":
print(f'{solution() = }')
| 44
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _lowercase :
@staticmethod
def UpperCamelCase ( *A__ , **A__ ) -> List[Any]:
pass
def __UpperCamelCase ( a : Image ) ->str:
snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase ( self , A__ , A__ ) -> List[Any]:
snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ )
import datasets
snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
snake_case = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , A__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def UpperCamelCase ( self ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase ( self ) -> Dict:
snake_case = '''Intel/dpt-large'''
snake_case = pipeline('''depth-estimation''' , model=A__ )
snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
snake_case = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def UpperCamelCase ( self ) -> Any:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 44
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class _lowercase ( __a ):
_UpperCAmelCase = '''codegen'''
_UpperCAmelCase = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self , A__=5_04_00 , A__=20_48 , A__=20_48 , A__=40_96 , A__=28 , A__=16 , A__=64 , A__=None , A__="gelu_new" , A__=0.0 , A__=0.0 , A__=0.0 , A__=1e-5 , A__=0.0_2 , A__=True , A__=5_02_56 , A__=5_02_56 , A__=False , **A__ , ) -> List[Any]:
snake_case = vocab_size
snake_case = n_ctx
snake_case = n_positions
snake_case = n_embd
snake_case = n_layer
snake_case = n_head
snake_case = n_inner
snake_case = rotary_dim
snake_case = activation_function
snake_case = resid_pdrop
snake_case = embd_pdrop
snake_case = attn_pdrop
snake_case = layer_norm_epsilon
snake_case = initializer_range
snake_case = use_cache
snake_case = bos_token_id
snake_case = eos_token_id
super().__init__(
bos_token_id=A__ , eos_token_id=A__ , tie_word_embeddings=A__ , **A__ )
class _lowercase ( __a ):
def __init__( self , A__ , A__ = "default" , A__ = None , A__ = False , ) -> str:
super().__init__(A__ , task=A__ , patching_specs=A__ , use_past=A__ )
if not getattr(self._config , '''pad_token_id''' , A__ ):
# TODO: how to do that better?
snake_case = 0
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
snake_case = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(A__ , direction='''inputs''' )
snake_case = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
snake_case = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def UpperCamelCase ( self ) -> int:
return self._config.n_layer
@property
def UpperCamelCase ( self ) -> int:
return self._config.n_head
def UpperCamelCase ( self , A__ , A__ = -1 , A__ = -1 , A__ = False , A__ = None , ) -> Mapping[str, Any]:
snake_case = super(A__ , self ).generate_dummy_inputs(
A__ , batch_size=A__ , seq_length=A__ , is_pair=A__ , framework=A__ )
# We need to order the input in the way they appears in the forward()
snake_case = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case , snake_case = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case = seqlen + 2
snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
snake_case = [
(torch.zeros(A__ ), torch.zeros(A__ )) for _ in range(self.num_layers )
]
snake_case = common_inputs['''attention_mask''']
if self.use_past:
snake_case = ordered_inputs['''attention_mask'''].dtype
snake_case = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(A__ , A__ , dtype=A__ )] , dim=1 )
return ordered_inputs
@property
def UpperCamelCase ( self ) -> int:
return 13
| 44
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __UpperCamelCase ( a : Optional[int] ) ->Dict:
snake_case = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a , a )
def __UpperCamelCase ( a : Optional[Any] ) ->int:
snake_case = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
snake_case = s_dict.pop(a )
elif "subsample" in key:
snake_case = s_dict.pop(a )
def __UpperCamelCase ( a : Optional[int] ) ->Optional[int]:
snake_case , snake_case = emb.weight.shape
snake_case = nn.Linear(a , a , bias=a )
snake_case = emb.weight.data
return lin_layer
def __UpperCamelCase ( a : Any , a : Tuple ) ->Tuple:
snake_case = torch.load(a , map_location='''cpu''' )
snake_case = mam_aaa['''args''']
snake_case = mam_aaa['''model''']
snake_case = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(a )
rename_keys(a )
snake_case = state_dict['''decoder.embed_tokens.weight'''].shape[0]
snake_case = args.share_decoder_input_output_embed
snake_case = [int(a ) for i in args.conv_kernel_sizes.split(''',''' )]
snake_case = SpeechaTextConfig(
vocab_size=a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(a ) , conv_channels=args.conv_channels , conv_kernel_sizes=a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=a , num_beams=5 , max_length=200 , use_cache=a , decoder_start_token_id=2 , early_stopping=a , )
snake_case = SpeechaTextForConditionalGeneration(a )
snake_case , snake_case = model.model.load_state_dict(a , strict=a )
if len(a ) > 0 and not set(a ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
snake_case = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case = lm_head_weights
model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowercase = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 44
| 1
|
'''simple docstring'''
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def __UpperCamelCase ( a : np.ndarray , a : np.ndarray ) ->float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(a , a ) ) )
def __UpperCamelCase ( a : np.ndarray , a : np.ndarray ) ->list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
snake_case = (
'''Wrong input data\'s dimensions... '''
f"""dataset : {dataset.ndim}, value_array : {value_array.ndim}"""
)
raise ValueError(a )
try:
if dataset.shape[1] != value_array.shape[1]:
snake_case = (
'''Wrong input data\'s shape... '''
f"""dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}"""
)
raise ValueError(a )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('''Wrong shape''' )
if dataset.dtype != value_array.dtype:
snake_case = (
'''Input data have different datatype... '''
f"""dataset : {dataset.dtype}, value_array : {value_array.dtype}"""
)
raise TypeError(a )
snake_case = []
for value in value_array:
snake_case = euclidean(a , dataset[0] )
snake_case = dataset[0].tolist()
for dataset_value in dataset[1:]:
snake_case = euclidean(a , a )
if dist > temp_dist:
snake_case = temp_dist
snake_case = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def __UpperCamelCase ( a : np.ndarray , a : np.ndarray ) ->float:
return np.dot(a , a ) / (norm(a ) * norm(a ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=__a ):
_UpperCAmelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *A__ , **A__ ) -> Union[str, Any]:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Optional[Any]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Any:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 44
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : str , a : Tuple=False , a : Tuple=False , a : Union[str, Any]=False ) ->Any:
snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias""") )
# embeddings
rename_keys.extend(
[
# text embeddings
('''text_embeddings.word_embeddings.weight''', '''vilt.embeddings.text_embeddings.word_embeddings.weight'''),
(
'''text_embeddings.position_embeddings.weight''',
'''vilt.embeddings.text_embeddings.position_embeddings.weight''',
),
('''text_embeddings.position_ids''', '''vilt.embeddings.text_embeddings.position_ids'''),
(
'''text_embeddings.token_type_embeddings.weight''',
'''vilt.embeddings.text_embeddings.token_type_embeddings.weight''',
),
('''text_embeddings.LayerNorm.weight''', '''vilt.embeddings.text_embeddings.LayerNorm.weight'''),
('''text_embeddings.LayerNorm.bias''', '''vilt.embeddings.text_embeddings.LayerNorm.bias'''),
# patch embeddings
('''transformer.cls_token''', '''vilt.embeddings.cls_token'''),
('''transformer.patch_embed.proj.weight''', '''vilt.embeddings.patch_embeddings.projection.weight'''),
('''transformer.patch_embed.proj.bias''', '''vilt.embeddings.patch_embeddings.projection.bias'''),
('''transformer.pos_embed''', '''vilt.embeddings.position_embeddings'''),
# token type embeddings
('''token_type_embeddings.weight''', '''vilt.embeddings.token_type_embeddings.weight'''),
] )
# final layernorm + pooler
rename_keys.extend(
[
('''transformer.norm.weight''', '''vilt.layernorm.weight'''),
('''transformer.norm.bias''', '''vilt.layernorm.bias'''),
('''pooler.dense.weight''', '''vilt.pooler.dense.weight'''),
('''pooler.dense.bias''', '''vilt.pooler.dense.bias'''),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
('''vqa_classifier.0.weight''', '''classifier.0.weight'''),
('''vqa_classifier.0.bias''', '''classifier.0.bias'''),
('''vqa_classifier.1.weight''', '''classifier.1.weight'''),
('''vqa_classifier.1.bias''', '''classifier.1.bias'''),
('''vqa_classifier.3.weight''', '''classifier.3.weight'''),
('''vqa_classifier.3.bias''', '''classifier.3.bias'''),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
('''nlvr2_classifier.0.weight''', '''classifier.0.weight'''),
('''nlvr2_classifier.0.bias''', '''classifier.0.bias'''),
('''nlvr2_classifier.1.weight''', '''classifier.1.weight'''),
('''nlvr2_classifier.1.bias''', '''classifier.1.bias'''),
('''nlvr2_classifier.3.weight''', '''classifier.3.weight'''),
('''nlvr2_classifier.3.bias''', '''classifier.3.bias'''),
] )
else:
pass
return rename_keys
def __UpperCamelCase ( a : int , a : Any ) ->str:
for i in range(config.num_hidden_layers ):
snake_case = '''vilt.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""" )
snake_case = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case = in_proj_weight[
: config.hidden_size, :
]
snake_case = in_proj_bias[: config.hidden_size]
snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case = in_proj_weight[
-config.hidden_size :, :
]
snake_case = in_proj_bias[-config.hidden_size :]
def __UpperCamelCase ( a : int ) ->List[str]:
snake_case = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(a , a )
def __UpperCamelCase ( a : Dict , a : List[str] , a : List[Any] ) ->List[str]:
snake_case = dct.pop(a )
snake_case = val
@torch.no_grad()
def __UpperCamelCase ( a : List[str] , a : List[Any] ) ->Optional[Any]:
snake_case = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=a )
snake_case = False
snake_case = False
snake_case = False
snake_case = False
if "vqa" in checkpoint_url:
snake_case = True
snake_case = 3129
snake_case = '''huggingface/label-files'''
snake_case = '''vqa2-id2label.json'''
snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
snake_case = {int(a ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
snake_case = ViltForQuestionAnswering(a )
elif "nlvr" in checkpoint_url:
snake_case = True
snake_case = 2
snake_case = {0: '''False''', 1: '''True'''}
snake_case = {v: k for k, v in config.idalabel.items()}
snake_case = 3
snake_case = ViltForImagesAndTextClassification(a )
elif "irtr" in checkpoint_url:
snake_case = True
snake_case = ViltForImageAndTextRetrieval(a )
elif "mlm_itm" in checkpoint_url:
snake_case = True
snake_case = ViltForMaskedLM(a )
else:
raise ValueError('''Unknown model type''' )
# load state_dict of original model, remove and rename some keys
snake_case = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )['''state_dict''']
snake_case = create_rename_keys(a , a , a , a )
for src, dest in rename_keys:
rename_key(a , a , a )
read_in_q_k_v(a , a )
if mlm_model or irtr_model:
snake_case = ['''itm_score.fc.weight''', '''itm_score.fc.bias''']
for k in ignore_keys:
state_dict.pop(a , a )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
snake_case , snake_case = model.load_state_dict(a , strict=a )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(a )
# Define processor
snake_case = ViltImageProcessor(size=384 )
snake_case = BertTokenizer.from_pretrained('''bert-base-uncased''' )
snake_case = ViltProcessor(a , a )
# Forward pass on example inputs (image + text)
if nlvr_model:
snake_case = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=a ).raw )
snake_case = Image.open(requests.get('''https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg''' , stream=a ).raw )
snake_case = (
'''The left image contains twice the number of dogs as the right image, and at least two dogs in total are'''
''' standing.'''
)
snake_case = processor(a , a , return_tensors='''pt''' )
snake_case = processor(a , a , return_tensors='''pt''' )
snake_case = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
snake_case = Image.open(requests.get('''http://images.cocodataset.org/val2017/000000039769.jpg''' , stream=a ).raw )
if mlm_model:
snake_case = '''a bunch of [MASK] laying on a [MASK].'''
else:
snake_case = '''How many cats are there?'''
snake_case = processor(a , a , return_tensors='''pt''' )
snake_case = model(**a )
# Verify outputs
if mlm_model:
snake_case = torch.Size([1, 11, 3_0522] )
snake_case = torch.tensor([-12.5061, -12.5123, -12.5174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify masked token prediction equals "cats"
snake_case = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
snake_case = torch.Size([1, 3129] )
snake_case = torch.tensor([-15.9495, -18.1472, -10.3041] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , a , atol=1e-4 )
# verify vqa prediction equals "2"
snake_case = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
snake_case = torch.Size([1, 2] )
snake_case = torch.tensor([-2.8721, 2.1291] )
assert torch.allclose(outputs.logits[0, :3] , a , atol=1e-4 )
assert outputs.logits.shape == expected_shape
Path(a ).mkdir(exist_ok=a )
print(f"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(a )
processor.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
_lowercase = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 44
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _lowercase :
def __init__( self , A__ ) -> None:
snake_case = value
snake_case = None
snake_case = None
class _lowercase :
def __init__( self , A__ ) -> None:
snake_case = tree
def UpperCamelCase ( self , A__ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 1
|
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_lowercase = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def __UpperCamelCase ( ) ->Dict:
snake_case = _ask_options(
'''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
snake_case = get_sagemaker_input()
else:
snake_case = get_cluster_input()
return config
def __UpperCamelCase ( a : List[str]=None ) ->int:
if subparsers is not None:
snake_case = subparsers.add_parser('''config''' , description=a )
else:
snake_case = argparse.ArgumentParser('''Accelerate config command''' , description=a )
parser.add_argument(
'''--config_file''' , default=a , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=a )
return parser
def __UpperCamelCase ( a : Union[str, Any] ) ->Optional[int]:
snake_case = get_user_input()
if args.config_file is not None:
snake_case = args.config_file
else:
if not os.path.isdir(a ):
os.makedirs(a )
snake_case = default_yaml_config_file
if config_file.endswith('''.json''' ):
config.to_json_file(a )
else:
config.to_yaml_file(a )
print(f"""accelerate configuration saved at {config_file}""" )
def __UpperCamelCase ( ) ->Dict:
snake_case = config_command_parser()
snake_case = parser.parse_args()
config_command(a )
if __name__ == "__main__":
main()
| 44
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_lowercase = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def __UpperCamelCase ( a : List[str] ) ->Optional[int]:
snake_case = torch.load(a , map_location='''cpu''' )
return sd
def __UpperCamelCase ( a : Optional[int] , a : Union[str, Any] , a : int=rename_keys_prefix ) ->Tuple:
snake_case = OrderedDict()
snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case = key
for name_pair in rename_keys_prefix:
snake_case = new_key.replace(name_pair[0] , name_pair[1] )
snake_case = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def __UpperCamelCase ( a : Optional[int] , a : int ) ->Union[str, Any]:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
snake_case = '''pretraining'''
if "vcr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 512}
snake_case = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
snake_case = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
snake_case = '''vqa'''
elif "nlvr" in checkpoint_path:
snake_case = {
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
snake_case = '''nlvr'''
snake_case = VisualBertConfig(**a )
# Load State Dict
snake_case = load_state_dict(a )
snake_case = get_new_dict(a , a )
if model_type == "pretraining":
snake_case = VisualBertForPreTraining(a )
elif model_type == "vqa":
snake_case = VisualBertForQuestionAnswering(a )
elif model_type == "nlvr":
snake_case = VisualBertForVisualReasoning(a )
elif model_type == "multichoice":
snake_case = VisualBertForMultipleChoice(a )
model.load_state_dict(a )
# Save Checkpoints
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_lowercase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 1
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_lowercase = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
_lowercase = {
'allenai/led-base-16384': 16_384,
}
class _lowercase ( __a ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , A__=None , A__=None , A__=None , A__="replace" , A__="<s>" , A__="</s>" , A__="</s>" , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__=False , A__=True , **A__ , ) -> List[Any]:
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , A__ ) != add_prefix_space:
snake_case = getattr(A__ , pre_tok_state.pop('''type''' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**A__ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = '''post_processor'''
snake_case = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['''sep'''] )
if "cls" in state:
snake_case = tuple(state['''cls'''] )
snake_case = False
if state.get('''add_prefix_space''' , A__ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('''trim_offsets''' , A__ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(A__ , state.pop('''type''' ) )
snake_case = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def UpperCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCamelCase ( self , A__ ) -> List[Any]:
snake_case = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
snake_case = value
def UpperCamelCase ( self , *A__ , **A__ ) -> BatchEncoding:
snake_case = kwargs.get('''is_split_into_words''' , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> BatchEncoding:
snake_case = kwargs.get('''is_split_into_words''' , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*A__ , **A__ )
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def UpperCamelCase ( self , A__ , A__=None ) -> Any:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> dict:
snake_case = super()._pad(
encoded_inputs=A__ , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , )
# Load from model defaults
if return_attention_mask is None:
snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(A__ )
if needs_to_be_padded:
snake_case = len(A__ ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 44
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]:
snake_case = original_name.split('''.''' )[0]
snake_case = key.split('''.''' )
snake_case = int(key_list[key_list.index(a ) - 2] )
snake_case = int(key_list[key_list.index(a ) - 1] )
snake_case = orig_block_num - offset
snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def __UpperCamelCase ( a : Tuple ) ->Dict:
snake_case = OrderedDict()
snake_case , snake_case = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case = key[: key.find('''proj''' )]
snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" )
snake_case = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case = key.replace('''head''' , '''classifier''' )
snake_case = value
return new_state_dict
def __UpperCamelCase ( ) ->Optional[int]:
snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case = Image.open(requests.get(a , stream=a ).raw )
return image
@torch.no_grad()
def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]:
snake_case = PoolFormerConfig()
# set attributes based on model_name
snake_case = '''huggingface/label-files'''
snake_case = model_name[-3:]
snake_case = 1000
snake_case = '''imagenet-1k-id2label.json'''
snake_case = (1, 1000)
# set config attributes
snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
snake_case = {int(a ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case = [2, 2, 6, 2]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s24":
snake_case = [4, 4, 12, 4]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s36":
snake_case = [6, 6, 18, 6]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.9
elif size == "m36":
snake_case = [6, 6, 18, 6]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
elif size == "m48":
snake_case = [8, 8, 24, 8]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
# Prepare image
snake_case = prepare_img()
snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
snake_case = torch.load(a , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case = rename_keys(a )
# create HuggingFace model and load state dict
snake_case = PoolFormerForImageClassification(a )
model.load_state_dict(a )
model.eval()
# Define image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case = model(a )
snake_case = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
snake_case = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
snake_case = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
snake_case = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowercase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 1
|
'''simple docstring'''
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _lowercase ( __a ):
def __init__( self , A__ , A__ , A__=10_24 , A__=10_24 , A__=3.6 ) -> Dict:
snake_case = tokenizer
snake_case = tokenizer.bos_token_id
snake_case = dataset
snake_case = seq_length
snake_case = seq_length * chars_per_token * num_of_sequences
def __iter__( self ) -> Optional[int]:
snake_case = iter(self.dataset )
snake_case = True
while more_examples:
snake_case , snake_case = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(A__ )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
snake_case = False
break
snake_case = tokenizer(A__ , truncation=A__ )['''input_ids''']
snake_case = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(A__ ) , self.seq_length ):
snake_case = all_token_ids[i : i + self.seq_length]
if len(A__ ) == self.seq_length:
yield torch.tensor(A__ )
def __UpperCamelCase ( a : Optional[int] ) ->str:
snake_case = {'''streaming''': True}
snake_case = load_dataset(args.dataset_name , split='''train''' , **a )
snake_case = ConstantLengthDataset(a , a , seq_length=args.seq_length )
snake_case = DataLoader(a , batch_size=args.batch_size )
return eval_dataloader
def __UpperCamelCase ( a : List[Any] ) ->Dict:
model.eval()
snake_case = []
for step, batch in enumerate(a ):
with torch.no_grad():
snake_case = model(a , labels=a )
snake_case = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(a ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
snake_case = torch.mean(torch.cat(a ) )
try:
snake_case = torch.exp(a )
except OverflowError:
snake_case = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
_lowercase = Accelerator()
# Parse configuration
_lowercase = HfArgumentParser(EvaluationArguments)
_lowercase = parser.parse_args()
set_seed(args.seed)
# Logging
_lowercase = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
_lowercase = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
_lowercase = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
_lowercase = create_dataloader(args)
# Prepare everything with our `accelerator`.
_lowercase , _lowercase = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
_lowercase , _lowercase = evaluate(args)
logger.info(f'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 44
|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
_lowercase = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'text-classification',
'language-modeling',
'summarization',
'token-classification',
'question-answering',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
_lowercase = logging.getLogger()
def __UpperCamelCase ( ) ->Tuple:
snake_case = argparse.ArgumentParser()
parser.add_argument('''-f''' )
snake_case = parser.parse_args()
return args.f
def __UpperCamelCase ( a : Dict , a : Tuple="eval" ) ->List[Any]:
snake_case = os.path.join(a , f"""{split}_results.json""" )
if os.path.exists(a ):
with open(a , '''r''' ) as f:
return json.load(a )
raise ValueError(f"""can't find {path}""" )
_lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class _lowercase ( __a ):
def UpperCamelCase ( self ) -> List[str]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_flax_glue.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def UpperCamelCase ( self ) -> List[Any]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_clm_flax.main()
snake_case = get_results(A__ )
self.assertLess(result['''eval_perplexity'''] , 1_00 )
@slow
def UpperCamelCase ( self ) -> int:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_summarization_flax.main()
snake_case = get_results(A__ , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 10 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_mlm_flax.main()
snake_case = get_results(A__ )
self.assertLess(result['''eval_perplexity'''] , 42 )
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_ta_mlm_flax.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def UpperCamelCase ( self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case = 7 if get_gpu_count() > 1 else 2
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_flax_ner.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = self.get_auto_remove_tmp_dir()
snake_case = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(A__ , '''argv''' , A__ ):
run_qa.main()
snake_case = get_results(A__ )
self.assertGreaterEqual(result['''eval_f1'''] , 30 )
self.assertGreaterEqual(result['''eval_exact'''] , 30 )
| 44
| 1
|
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {"vocab_file": "spiece.model"}
_lowercase = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
_lowercase = {
"albert-base-v1": 512,
"albert-large-v1": 512,
"albert-xlarge-v1": 512,
"albert-xxlarge-v1": 512,
"albert-base-v2": 512,
"albert-large-v2": 512,
"albert-xlarge-v2": 512,
"albert-xxlarge-v2": 512,
}
_lowercase = "▁"
class _lowercase ( _lowerCamelCase ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , A__ , A__=True , A__=True , A__=False , A__="[CLS]" , A__="[SEP]" , A__="<unk>" , A__="[SEP]" , A__="<pad>" , A__="[CLS]" , A__="[MASK]" , A__ = None , **A__ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case = (
AddedToken(A__ , lstrip=A__ , rstrip=A__ , normalized=A__ )
if isinstance(A__ , A__ )
else mask_token
)
snake_case = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=A__ , remove_space=A__ , keep_accents=A__ , bos_token=A__ , eos_token=A__ , unk_token=A__ , sep_token=A__ , pad_token=A__ , cls_token=A__ , mask_token=A__ , sp_model_kwargs=self.sp_model_kwargs , **A__ , )
snake_case = do_lower_case
snake_case = remove_space
snake_case = keep_accents
snake_case = vocab_file
snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A__ )
@property
def UpperCamelCase ( self ) -> int:
return len(self.sp_model )
def UpperCamelCase ( self ) -> List[Any]:
snake_case = {self.convert_ids_to_tokens(A__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Tuple:
snake_case = self.__dict__.copy()
snake_case = None
return state
def __setstate__( self , A__ ) -> str:
snake_case = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
snake_case = {}
snake_case = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
if self.remove_space:
snake_case = ''' '''.join(inputs.strip().split() )
else:
snake_case = inputs
snake_case = outputs.replace('''``''' , '''\"''' ).replace('''\'\'''' , '''\"''' )
if not self.keep_accents:
snake_case = unicodedata.normalize('''NFKD''' , A__ )
snake_case = ''''''.join([c for c in outputs if not unicodedata.combining(A__ )] )
if self.do_lower_case:
snake_case = outputs.lower()
return outputs
def UpperCamelCase ( self , A__ ) -> List[str]:
snake_case = self.preprocess_text(A__ )
snake_case = self.sp_model.encode(A__ , out_type=A__ )
snake_case = []
for piece in pieces:
if len(A__ ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
snake_case = self.sp_model.EncodeAsPieces(piece[:-1].replace(A__ , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case = cur_pieces[1:]
else:
snake_case = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(A__ )
else:
new_pieces.append(A__ )
return new_pieces
def UpperCamelCase ( self , A__ ) -> Tuple:
return self.sp_model.PieceToId(A__ )
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.sp_model.IdToPiece(A__ )
def UpperCamelCase ( self , A__ ) -> Optional[Any]:
snake_case = []
snake_case = ''''''
snake_case = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(A__ ) + token
snake_case = True
snake_case = []
else:
current_sub_tokens.append(A__ )
snake_case = False
out_string += self.sp_model.decode(A__ )
return out_string.strip()
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self , A__ , A__ = None , A__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A__ , token_ids_a=A__ , already_has_special_tokens=A__ )
if token_ids_a is not None:
return [1] + ([0] * len(A__ )) + [1] + ([0] * len(A__ )) + [1]
return [1] + ([0] * len(A__ )) + [1]
def UpperCamelCase ( self , A__ , A__ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self , A__ , A__ = None ) -> Tuple[str]:
if not os.path.isdir(A__ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case = os.path.join(
A__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , A__ )
elif not os.path.isfile(self.vocab_file ):
with open(A__ , '''wb''' ) as fi:
snake_case = self.sp_model.serialized_model_proto()
fi.write(A__ )
return (out_vocab_file,)
| 700
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_lowercase = logging.get_logger(__name__)
_lowercase = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _lowercase ( __a ):
def __init__( self , A__=None , A__=None , *A__ , **A__ ) -> Union[str, Any]:
super().__init__(*A__ , **A__ )
if config is None:
assert isinstance(self.model , A__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
snake_case = self.model.config
else:
snake_case = config
snake_case = data_args
snake_case = self.config.tgt_vocab_size if isinstance(self.config , A__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case = label_smoothed_nll_loss
def UpperCamelCase ( self , A__ ) -> Tuple:
if self.optimizer is None:
snake_case = ['''bias''', '''LayerNorm.weight''']
snake_case = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
snake_case = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case = Adafactor
snake_case = {'''scale_parameter''': False, '''relative_step''': False}
else:
snake_case = AdamW
snake_case = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
snake_case = self.args.learning_rate
if self.sharded_ddp:
snake_case = OSS(
params=A__ , optim=A__ , **A__ , )
else:
snake_case = optimizer_cls(A__ , **A__ )
if self.lr_scheduler is None:
snake_case = self._get_lr_scheduler(A__ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def UpperCamelCase ( self , A__ ) -> Tuple:
snake_case = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A__ )
return scheduler
def UpperCamelCase ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case = model(**A__ , use_cache=A__ )[0]
snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case , snake_case = model(**A__ , labels=A__ , use_cache=A__ )[:2]
else:
# compute label smoothed loss
snake_case = model(**A__ , use_cache=A__ )[0]
snake_case = torch.nn.functional.log_softmax(A__ , dim=-1 )
snake_case , snake_case = self.loss_fn(A__ , A__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase ( self , A__ , A__ ) -> Any:
snake_case = inputs.pop('''labels''' )
snake_case , snake_case = self._compute_loss(A__ , A__ , A__ )
return loss
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
snake_case = self._prepare_inputs(A__ )
snake_case = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **A__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] )
snake_case = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
snake_case , snake_case = self._compute_loss(A__ , A__ , A__ )
snake_case = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def UpperCamelCase ( self , A__ , A__ ) -> List[str]:
# If PAD token is not defined at least EOS token has to be defined
snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F""" padded to `max_length`={max_length}""" )
snake_case = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case = tensor
return padded_tensor
| 44
| 0
|
'''simple docstring'''
from math import pi, sqrt
def __UpperCamelCase ( a : Optional[Any] ) ->str:
if num <= 0:
raise ValueError('''math domain error''' )
if num > 171.5:
raise OverflowError('''math range error''' )
elif num - int(__A ) not in (0, 0.5):
raise NotImplementedError('''num must be an integer or a half-integer''' )
elif num == 0.5:
return sqrt(__A )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def __UpperCamelCase ( ) ->Dict:
assert gamma(0.5 ) == sqrt(__A )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input('Gamma of: '))
print(f'gamma({num}) = {gamma(num)}')
print('\nEnter 0 to exit...')
| 701
|
'''simple docstring'''
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def __UpperCamelCase ( a : List[str] ) ->str:
snake_case = []
for line in lines:
snake_case = re.sub(R'''#.*''' , '''''' , a ) # remove comments
if line:
filtered_lines.append(a )
snake_case = '''\n'''.join(a )
# Make a hash from all this code
snake_case = full_str.encode('''utf-8''' )
return shaaaa(a ).hexdigest()
# get importable module names and hash for caching
_lowercase = {
'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
_lowercase = {
'.csv': ('csv', {}),
'.tsv': ('csv', {'sep': '\t'}),
'.json': ('json', {}),
'.jsonl': ('json', {}),
'.parquet': ('parquet', {}),
'.arrow': ('arrow', {}),
'.txt': ('text', {}),
}
_EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_lowercase = {'imagefolder', 'audiofolder'}
# Used to filter data files based on extensions given a module name
_lowercase = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append('.zip')
_MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
| 44
| 0
|
'''simple docstring'''
from maths.prime_factors import prime_factors
def __UpperCamelCase ( a : Optional[int] ) ->int:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
snake_case = f"""Input value of [number={number}] must be an integer"""
raise TypeError(_SCREAMING_SNAKE_CASE )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(_SCREAMING_SNAKE_CASE ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702
|
'''simple docstring'''
_lowercase = {
'Pillow': 'Pillow',
'accelerate': 'accelerate>=0.11.0',
'compel': 'compel==0.1.8',
'black': 'black~=23.1',
'datasets': 'datasets',
'filelock': 'filelock',
'flax': 'flax>=0.4.1',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.13.2',
'requests-mock': 'requests-mock==1.10.0',
'importlib_metadata': 'importlib_metadata',
'invisible-watermark': 'invisible-watermark',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2',
'jaxlib': 'jaxlib>=0.1.65',
'Jinja2': 'Jinja2',
'k-diffusion': 'k-diffusion>=0.0.12',
'torchsde': 'torchsde',
'note_seq': 'note_seq',
'librosa': 'librosa',
'numpy': 'numpy',
'omegaconf': 'omegaconf',
'parameterized': 'parameterized',
'protobuf': 'protobuf>=3.20.3,<4',
'pytest': 'pytest',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'ruff': 'ruff>=0.0.241',
'safetensors': 'safetensors',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'scipy': 'scipy',
'onnx': 'onnx',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'tensorboard': 'tensorboard',
'torch': 'torch>=1.4',
'torchvision': 'torchvision',
'transformers': 'transformers>=4.25.1',
'urllib3': 'urllib3<=2.0.0',
}
| 44
| 0
|
'''simple docstring'''
def __UpperCamelCase ( a : List[Any] ) ->bool:
if not isinstance(a , a ):
snake_case = f"""Input value of [number={number}] must be an integer"""
raise TypeError(a )
if number < 0:
return False
snake_case = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = IFInpaintingSuperResolutionPipeline
_UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''width''', '''height'''}
_UpperCAmelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({'''original_image'''} )
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
def UpperCamelCase ( self ) -> int:
return self._get_superresolution_dummy_components()
def UpperCamelCase ( self , A__ , A__=0 ) -> Union[str, Any]:
if str(A__ ).startswith('''mps''' ):
snake_case = torch.manual_seed(A__ )
else:
snake_case = torch.Generator(device=A__ ).manual_seed(A__ )
snake_case = floats_tensor((1, 3, 16, 16) , rng=random.Random(A__ ) ).to(A__ )
snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
snake_case = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase ( self ) -> List[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase ( self ) -> Optional[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def UpperCamelCase ( self ) -> List[str]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase ( self ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase ( self ) -> Optional[Any]:
self._test_save_load_local()
def UpperCamelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 44
| 0
|
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def __UpperCamelCase ( a : Dict , a : List[Any] = True , a : int = math.inf , a : Tuple = -math.inf , a : List[str] = math.inf , a : Union[str, Any] = -math.inf , a : Tuple = False , a : List[Any] = 100 , a : str = 0.01 , a : Any = 1 , ) ->Any:
snake_case = False
snake_case = search_prob
snake_case = start_temperate
snake_case = []
snake_case = 0
snake_case = None
while not search_end:
snake_case = current_state.score()
if best_state is None or current_score > best_state.score():
snake_case = current_state
scores.append(_lowercase )
iterations += 1
snake_case = None
snake_case = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
snake_case = random.randint(0 , len(_lowercase ) - 1 ) # picking a random neighbor
snake_case = neighbors.pop(_lowercase )
snake_case = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
snake_case = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
snake_case = picked_neighbor
else:
snake_case = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
snake_case = picked_neighbor
snake_case = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
snake_case = True
else:
snake_case = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(_lowercase ) , _lowercase )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def __UpperCamelCase ( a : Optional[Any] , a : Tuple ) ->Dict:
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_lowercase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowercase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
_lowercase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowercase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
'The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def __UpperCamelCase ( a : List[str] , a : Optional[Any] ) ->List[str]:
return (3 * x**2) - (6 * y)
_lowercase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowercase = simulated_annealing(prob, find_max=False, visualization=True)
print(
'The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f'{local_min.score()}'
)
_lowercase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowercase = simulated_annealing(prob, find_max=True, visualization=True)
print(
'The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '
f'{local_min.score()}'
)
| 704
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase = logging.get_logger(__name__)
class _lowercase ( __a ):
def __init__( self , A__ , A__ , A__ , **A__ ) -> Union[str, Any]:
snake_case = feature_size
snake_case = sampling_rate
snake_case = padding_value
snake_case = kwargs.pop('''padding_side''' , '''right''' )
snake_case = kwargs.pop('''return_attention_mask''' , A__ )
super().__init__(**A__ )
def UpperCamelCase ( self , A__ , A__ = True , A__ = None , A__ = False , A__ = None , A__ = None , A__ = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A__ , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
snake_case = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'''You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'''
F""" to this method that includes {self.model_input_names[0]}, but you provided"""
F""" {list(processed_features.keys() )}""" )
snake_case = processed_features[self.model_input_names[0]]
snake_case = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A__ ) == 0:
if return_attention_mask:
snake_case = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
snake_case = required_input[0]
if isinstance(A__ , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
snake_case = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A__ ):
snake_case = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A__ ):
snake_case = '''tf'''
elif is_torch_tensor(A__ ):
snake_case = '''pt'''
elif isinstance(A__ , (int, float, list, tuple, np.ndarray) ):
snake_case = '''np'''
else:
raise ValueError(
F"""type of {first_element} unknown: {type(A__ )}. """
'''Should be one of a python, numpy, pytorch or tensorflow object.''' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
snake_case = to_numpy(A__ )
else:
snake_case = [to_numpy(A__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
snake_case = self._get_padding_strategies(padding=A__ , max_length=A__ )
snake_case = processed_features[self.model_input_names[0]]
snake_case = len(A__ )
if not all(len(A__ ) == batch_size for v in processed_features.values() ):
raise ValueError('''Some items in the output dictionary have a different batch size than others.''' )
snake_case = []
for i in range(A__ ):
snake_case = {k: v[i] for k, v in processed_features.items()}
# truncation
snake_case = self._truncate(
A__ , max_length=A__ , pad_to_multiple_of=A__ , truncation=A__ , )
truncated_inputs.append(A__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
snake_case = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
snake_case = PaddingStrategy.MAX_LENGTH
snake_case = {}
for i in range(A__ ):
# padding
snake_case = self._pad(
truncated_inputs[i] , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , )
for key, value in outputs.items():
if key not in batch_outputs:
snake_case = []
if value.dtype is np.dtype(np.floataa ):
snake_case = value.astype(np.floataa )
batch_outputs[key].append(A__ )
return BatchFeature(A__ , tensor_type=A__ )
def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> dict:
snake_case = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
snake_case = len(A__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
snake_case = np.ones(len(A__ ) , dtype=np.intaa )
if needs_to_be_padded:
snake_case = max_length - len(A__ )
if self.padding_side == "right":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (0, difference) )
snake_case = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
snake_case = np.pad(
A__ , A__ , '''constant''' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
snake_case = np.pad(
processed_features['''attention_mask'''] , (difference, 0) )
snake_case = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
snake_case = np.pad(
A__ , A__ , '''constant''' , constant_values=self.padding_value )
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return processed_features
def UpperCamelCase ( self , A__ , A__ = None , A__ = None , A__ = None , ) -> Union[str, Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('''When setting ``truncation=True``, make sure that ``max_length`` is defined.''' )
snake_case = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
snake_case = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
snake_case = len(A__ ) > max_length
if needs_to_be_truncated:
snake_case = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
snake_case = processed_features['''attention_mask'''][:max_length]
return processed_features
def UpperCamelCase ( self , A__=False , A__=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
snake_case = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A__ , A__ ):
snake_case = PaddingStrategy(A__ )
elif isinstance(A__ , A__ ):
snake_case = padding
else:
snake_case = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F"""When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined""" )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'''Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'''
''' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.''' )
return padding_strategy
| 44
| 0
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
_lowercase = logging.get_logger(__name__)
_lowercase = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all LED models at https://huggingface.co/models?filter=LED
_lowercase = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
_lowercase = {
"allenai/led-base-16384": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def __UpperCamelCase ( ) ->List[str]:
snake_case = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
snake_case = bs[:]
snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_A )
cs.append(2**8 + n )
n += 1
snake_case = [chr(_A ) for n in cs]
return dict(zip(_A , _A ) )
def __UpperCamelCase ( a : Any ) ->List[str]:
snake_case = set()
snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case = char
return pairs
class _lowercase ( lowercase__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , A__ , A__ , A__="replace" , A__="<s>" , A__="</s>" , A__="</s>" , A__="<s>" , A__="<unk>" , A__="<pad>" , A__="<mask>" , A__=False , **A__ , ) -> Any:
snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else bos_token
snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else eos_token
snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else sep_token
snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else cls_token
snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else unk_token
snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case = AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else mask_token
super().__init__(
errors=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , cls_token=__lowerCamelCase , pad_token=__lowerCamelCase , mask_token=__lowerCamelCase , add_prefix_space=__lowerCamelCase , **__lowerCamelCase , )
with open(__lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
snake_case = json.load(__lowerCamelCase )
snake_case = {v: k for k, v in self.encoder.items()}
snake_case = errors # how to handle errors in decoding
snake_case = bytes_to_unicode()
snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(__lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
snake_case = merges_handle.read().split('''\n''' )[1:-1]
snake_case = [tuple(merge.split() ) for merge in bpe_merges]
snake_case = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
snake_case = {}
snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def UpperCamelCase ( self ) -> Optional[int]:
return len(self.encoder )
def UpperCamelCase ( self ) -> Optional[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase ( self , A__ ) -> List[Any]:
if token in self.cache:
return self.cache[token]
snake_case = tuple(__lowerCamelCase )
snake_case = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
snake_case = min(__lowerCamelCase , key=lambda A__ : self.bpe_ranks.get(__lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case = bigram
snake_case = []
snake_case = 0
while i < len(__lowerCamelCase ):
try:
snake_case = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case = tuple(__lowerCamelCase )
snake_case = new_word
if len(__lowerCamelCase ) == 1:
break
else:
snake_case = get_pairs(__lowerCamelCase )
snake_case = " ".join(__lowerCamelCase )
snake_case = word
return word
def UpperCamelCase ( self , A__ ) -> str:
snake_case = []
for token in re.findall(self.pat , __lowerCamelCase ):
snake_case = "".join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def UpperCamelCase ( self , A__ ) -> Dict:
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.decoder.get(__lowerCamelCase )
def UpperCamelCase ( self , A__ ) -> Any:
snake_case = "".join(__lowerCamelCase )
snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def UpperCamelCase ( self , A__ , A__ = None ) -> Optional[Any]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(
__lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + '''\n''' )
snake_case = 0
with open(__lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda A__ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
snake_case = token_index
writer.write(''' '''.join(__lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def UpperCamelCase ( self , A__ , A__ = None ) -> List[str]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case = [self.cls_token_id]
snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase ( self , A__ , A__ = None , A__ = False ) -> Any:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1, 1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase ( self , A__ , A__ = None ) -> Optional[Any]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]:
snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__lowerCamelCase ) > 0 and not text[0].isspace()):
snake_case = " " + text
return (text, kwargs)
def UpperCamelCase ( self , A__ , A__ = None , A__ = PaddingStrategy.DO_NOT_PAD , A__ = None , A__ = None , ) -> Any:
snake_case = super()._pad(
encoded_inputs=__lowerCamelCase , max_length=__lowerCamelCase , padding_strategy=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
snake_case = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(__lowerCamelCase )
if needs_to_be_padded:
snake_case = len(__lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 705
|
'''simple docstring'''
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _lowercase ( yaml.SafeLoader ):
def UpperCamelCase ( self , A__ ) -> List[str]:
snake_case = [self.constructed_objects[key_node] for key_node, _ in node.value]
snake_case = [tuple(A__ ) if isinstance(A__ , A__ ) else key for key in keys]
snake_case = Counter(A__ )
snake_case = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCamelCase ( self , A__ , A__=False ) -> List[Any]:
snake_case = super().construct_mapping(A__ , deep=A__ )
self._check_no_duplicates_on_constructed_node(A__ )
return mapping
def __UpperCamelCase ( a : str ) ->Tuple[Optional[str], str]:
snake_case = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
snake_case = full_content[1:].index('''---''' ) + 1
snake_case = '''\n'''.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class _lowercase ( __a ):
# class attributes
_UpperCAmelCase = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case , snake_case = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(A__ )
else:
return cls()
def UpperCamelCase ( self , A__ ) -> str:
if path.exists():
with open(A__ , encoding='''utf-8''' ) as readme_file:
snake_case = readme_file.read()
else:
snake_case = None
snake_case = self._to_readme(A__ )
with open(A__ , '''w''' , encoding='''utf-8''' ) as readme_file:
readme_file.write(A__ )
def UpperCamelCase ( self , A__ = None ) -> str:
if readme_content is not None:
snake_case , snake_case = _split_yaml_from_readme(A__ )
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n''' + content
else:
snake_case = '''---\n''' + self.to_yaml_string() + '''---\n'''
return full_content
@classmethod
def UpperCamelCase ( cls , A__ ) -> "DatasetMetadata":
snake_case = yaml.load(A__ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
snake_case = {
(key.replace('''-''' , '''_''' ) if key.replace('''-''' , '''_''' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**A__ )
def UpperCamelCase ( self ) -> str:
return yaml.safe_dump(
{
(key.replace('''_''' , '''-''' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=A__ , allow_unicode=A__ , encoding='''utf-8''' , ).decode('''utf-8''' )
_lowercase = {
'image-classification': [],
'translation': [],
'image-segmentation': [],
'fill-mask': [],
'automatic-speech-recognition': [],
'token-classification': [],
'sentence-similarity': [],
'audio-classification': [],
'question-answering': [],
'summarization': [],
'zero-shot-classification': [],
'table-to-text': [],
'feature-extraction': [],
'other': [],
'multiple-choice': [],
'text-classification': [],
'text-to-image': [],
'text2text-generation': [],
'zero-shot-image-classification': [],
'tabular-classification': [],
'tabular-regression': [],
'image-to-image': [],
'tabular-to-text': [],
'unconditional-image-generation': [],
'text-retrieval': [],
'text-to-speech': [],
'object-detection': [],
'audio-to-audio': [],
'text-generation': [],
'conversational': [],
'table-question-answering': [],
'visual-question-answering': [],
'image-to-text': [],
'reinforcement-learning': [],
'voice-activity-detection': [],
'time-series-forecasting': [],
'document-question-answering': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_lowercase = ArgumentParser(usage='Validate the yaml metadata block of a README.md file.')
ap.add_argument('readme_filepath')
_lowercase = ap.parse_args()
_lowercase = Path(args.readme_filepath)
_lowercase = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 44
| 0
|
'''simple docstring'''
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase ( __lowerCamelCase ):
_UpperCAmelCase = (IPNDMScheduler,)
_UpperCAmelCase = (('''num_inference_steps''', 50),)
def UpperCamelCase ( self , **A__ ) -> Union[str, Any]:
snake_case = {'num_train_timesteps': 10_00}
config.update(**UpperCAmelCase_ )
return config
def UpperCamelCase ( self , A__=0 , **A__ ) -> Tuple:
snake_case = dict(self.forward_default_kwargs )
snake_case = kwargs.pop('''num_inference_steps''' , UpperCAmelCase_ )
snake_case = self.dummy_sample
snake_case = 0.1 * sample
snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
snake_case = self.get_scheduler_config(**UpperCAmelCase_ )
snake_case = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals
snake_case = dummy_past_residuals[:]
if time_step is None:
snake_case = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_ )
snake_case = scheduler_class.from_pretrained(UpperCAmelCase_ )
new_scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals
snake_case = dummy_past_residuals[:]
snake_case = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
snake_case = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
snake_case = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self ) -> Any:
pass
def UpperCamelCase ( self , A__=0 , **A__ ) -> Dict:
snake_case = dict(self.forward_default_kwargs )
snake_case = kwargs.pop('''num_inference_steps''' , UpperCAmelCase_ )
snake_case = self.dummy_sample
snake_case = 0.1 * sample
snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case = dummy_past_residuals[:]
if time_step is None:
snake_case = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_ )
snake_case = scheduler_class.from_pretrained(UpperCAmelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residual (must be after setting timesteps)
snake_case = dummy_past_residuals[:]
snake_case = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
snake_case = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
snake_case = new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def UpperCamelCase ( self , **A__ ) -> List[Any]:
snake_case = self.scheduler_classes[0]
snake_case = self.get_scheduler_config(**UpperCAmelCase_ )
snake_case = scheduler_class(**UpperCAmelCase_ )
snake_case = 10
snake_case = self.dummy_model()
snake_case = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
snake_case = model(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
snake_case = model(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
return sample
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = dict(self.forward_default_kwargs )
snake_case = kwargs.pop('''num_inference_steps''' , UpperCAmelCase_ )
for scheduler_class in self.scheduler_classes:
snake_case = self.get_scheduler_config()
snake_case = scheduler_class(**UpperCAmelCase_ )
snake_case = self.dummy_sample
snake_case = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase_ , '''set_timesteps''' ):
scheduler.set_timesteps(UpperCAmelCase_ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase_ , '''set_timesteps''' ):
snake_case = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
snake_case = dummy_past_residuals[:]
snake_case = scheduler.timesteps[5]
snake_case = scheduler.timesteps[6]
snake_case = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
snake_case = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
snake_case = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase ( self ) -> Union[str, Any]:
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ , time_step=UpperCAmelCase_ )
def UpperCamelCase ( self ) -> List[str]:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=UpperCAmelCase_ , time_step=UpperCAmelCase_ )
def UpperCamelCase ( self ) -> str:
snake_case = self.full_loop()
snake_case = torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 2_54_05_29 ) < 10
| 706
|
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
_UpperCAmelCase = CodeGenTokenizer
_UpperCAmelCase = CodeGenTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = {'''add_prefix_space''': True}
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
snake_case = dict(zip(A__ , range(len(A__ ) ) ) )
snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case = {'''unk_token''': '''<unk>'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A__ ) )
def UpperCamelCase ( self , **A__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , **A__ ) -> Union[str, Any]:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def UpperCamelCase ( self , A__ ) -> Tuple:
snake_case = '''lower newer'''
snake_case = '''lower newer'''
return input_text, output_text
def UpperCamelCase ( self ) -> List[Any]:
snake_case = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case = '''lower newer'''
snake_case = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ )
self.assertListEqual(A__ , A__ )
snake_case = tokens + [tokenizer.unk_token]
snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def UpperCamelCase ( self ) -> Optional[int]:
if not self.test_rust_tokenizer:
return
snake_case = self.get_tokenizer()
snake_case = self.get_rust_tokenizer(add_prefix_space=A__ )
snake_case = '''lower newer'''
# Testing tokenization
snake_case = tokenizer.tokenize(A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.tokenize(A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids without special tokens
snake_case = tokenizer.encode(A__ , add_special_tokens=A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.encode(A__ , add_special_tokens=A__ )
self.assertListEqual(A__ , A__ )
# Testing conversion to ids with special tokens
snake_case = self.get_rust_tokenizer(add_prefix_space=A__ )
snake_case = tokenizer.encode(A__ , add_prefix_space=A__ )
snake_case = rust_tokenizer.encode(A__ )
self.assertListEqual(A__ , A__ )
# Testing the unknown token
snake_case = tokens + [rust_tokenizer.unk_token]
snake_case = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A__ ) , A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> List[str]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def UpperCamelCase ( self , A__=15 ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case = self.rust_tokenizer_class.from_pretrained(A__ , **A__ )
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Simple input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(A__ , tokenizer_r.encode_plus , A__ , max_length=A__ , padding='''max_length''' )
# Pair input
self.assertRaises(
A__ , tokenizer_r.batch_encode_plus , A__ , max_length=A__ , padding='''max_length''' , )
def UpperCamelCase ( self ) -> Tuple:
snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input looooooooong''', '''This is a simple input''']
snake_case = ('''This is a simple input''', '''This is a pair''')
snake_case = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
snake_case = tokenizer.pad_token_id
snake_case = tokenizer(A__ , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' )
snake_case = tokenizer(*A__ , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
snake_case = tokenizer(A__ , padding=A__ , truncate=A__ , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def UpperCamelCase ( self ) -> str:
snake_case = '''$$$'''
snake_case = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=A__ , add_bos_token=A__ )
snake_case = '''This is a simple input'''
snake_case = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case = tokenizer.bos_token_id
snake_case = tokenizer(A__ )
snake_case = tokenizer(A__ )
self.assertEqual(out_s.input_ids[0] , A__ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case = tokenizer.decode(out_s.input_ids )
snake_case = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , A__ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = CodeGenTokenizer.from_pretrained('''Salesforce/codegen-350M-mono''' )
snake_case = '''\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'''
snake_case = '''\nif len_a > len_b: result = a\nelse: result = b'''
snake_case = tokenizer.encode(A__ )
snake_case = ['''^#''', re.escape('''<|endoftext|>''' ), '''^\'\'\'''', '''^"""''', '''\n\n\n''']
snake_case = tokenizer.decode(A__ , truncate_before_pattern=A__ )
self.assertEqual(A__ , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
pass
| 44
| 0
|
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_lowercase = logging.getLogger(__name__)
class _lowercase ( UpperCAmelCase_ ):
def __init__( self , A__=-1 ) -> Tuple:
# in NER datasets, the last column is usually reserved for NER label
snake_case = label_idx
def UpperCamelCase ( self , A__ , A__ ) -> List[InputExample]:
if isinstance(_lowercase , _lowercase ):
snake_case = mode.value
snake_case = os.path.join(_lowercase , F"""{mode}.txt""" )
snake_case = 1
snake_case = []
with open(_lowercase , encoding='''utf-8''' ) as f:
snake_case = []
snake_case = []
for line in f:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_lowercase , labels=_lowercase ) )
guid_index += 1
snake_case = []
snake_case = []
else:
snake_case = line.split(''' ''' )
words.append(splits[0] )
if len(_lowercase ) > 1:
labels.append(splits[self.label_idx].replace('''\n''' , '''''' ) )
else:
# Examples could have no label for mode = "test"
labels.append('''O''' )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_lowercase , labels=_lowercase ) )
return examples
def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[str]:
snake_case = 0
for line in test_input_reader:
if line.startswith('''-DOCSTART-''' ) or line == "" or line == "\n":
writer.write(_lowercase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
snake_case = line.split()[0] + ' ' + preds_list[example_id].pop(0 ) + '\n'
writer.write(_lowercase )
else:
logger.warning('''Maximum sequence length exceeded: No prediction for \'%s\'.''' , line.split()[0] )
def UpperCamelCase ( self , A__ ) -> List[str]:
if path:
with open(_lowercase , '''r''' ) as f:
snake_case = f.read().splitlines()
if "O" not in labels:
snake_case = ['O'] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class _lowercase ( UpperCAmelCase_ ):
def __init__( self ) -> Tuple:
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def UpperCamelCase ( self , A__ ) -> List[str]:
if path:
with open(_lowercase , '''r''' ) as f:
snake_case = f.read().splitlines()
if "O" not in labels:
snake_case = ['O'] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class _lowercase ( UpperCAmelCase_ ):
def UpperCamelCase ( self , A__ , A__ ) -> List[InputExample]:
if isinstance(_lowercase , _lowercase ):
snake_case = mode.value
snake_case = os.path.join(_lowercase , F"""{mode}.txt""" )
snake_case = 1
snake_case = []
with open(_lowercase , encoding='''utf-8''' ) as f:
for sentence in parse_incr(_lowercase ):
snake_case = []
snake_case = []
for token in sentence:
words.append(token['''form'''] )
labels.append(token['''upos'''] )
assert len(_lowercase ) == len(_lowercase )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_lowercase , labels=_lowercase ) )
guid_index += 1
return examples
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]:
snake_case = 0
for sentence in parse_incr(_lowercase ):
snake_case = preds_list[example_id]
snake_case = ''
for token in sentence:
out += F"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(_lowercase )
example_id += 1
def UpperCamelCase ( self , A__ ) -> List[str]:
if path:
with open(_lowercase , '''r''' ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 707
|
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowercase :
def __init__( self , A__ , A__=13 , A__=30 , A__=2 , A__=3 , A__=True , A__=True , A__=32 , A__=2 , A__=4 , A__=37 , A__="gelu" , A__=0.1 , A__=0.1 , A__=10 , A__=0.0_2 , A__=3 , A__=None , ) -> List[Any]:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def UpperCamelCase ( self ) -> int:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ) -> int:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A__ , initializer_range=self.initializer_range , )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = TFViTModel(config=A__ )
snake_case = model(A__ , training=A__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
snake_case = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Optional[int]:
snake_case = self.type_sequence_label_size
snake_case = TFViTForImageClassification(A__ )
snake_case = model(A__ , labels=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case = self.image_size // 2
snake_case = pixel_values[:, :, :image_size, :image_size]
snake_case = model(A__ , interpolate_pos_encoding=A__ , training=A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = TFViTForImageClassification(A__ )
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(A__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case = config_and_inputs
snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class _lowercase ( __a , __a , unittest.TestCase ):
_UpperCAmelCase = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def UpperCamelCase ( self ) -> List[Any]:
snake_case = TFViTModelTester(self )
snake_case = ConfigTester(self , config_class=A__ , has_text_modality=A__ , hidden_size=37 )
def UpperCamelCase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> int:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ) -> str:
pass
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ , tf.keras.layers.Layer ) )
def UpperCamelCase ( self ) -> List[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(A__ )
snake_case = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , A__ )
def UpperCamelCase ( self ) -> Union[str, Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__ )
def UpperCamelCase ( self ) -> Optional[Any]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A__ )
@slow
def UpperCamelCase ( self ) -> Any:
snake_case = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(A__ )
def __UpperCamelCase ( ) ->Any:
snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ) -> Optional[int]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ) -> Dict:
snake_case = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=A__ , return_tensors='''tf''' )
# forward pass
snake_case = model(**A__ )
# verify the logits
snake_case = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , A__ )
snake_case = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , A__ , atol=1e-4 )
| 44
| 0
|
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
_lowercase = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def __UpperCamelCase ( a : Optional[Any] , a : Tuple , a : Dict , a : Optional[int] , a : List[str] ) ->Dict:
for attribute in key.split('''.''' ):
snake_case = getattr(__A , __A )
if weight_type is not None:
snake_case = getattr(__A , __A ).shape
else:
snake_case = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case = value
elif weight_type == "weight_g":
snake_case = value
elif weight_type == "weight_v":
snake_case = value
elif weight_type == "bias":
snake_case = value
else:
snake_case = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCamelCase ( a : int , a : List[str] ) ->Optional[Any]:
snake_case = []
snake_case = fairseq_model.state_dict()
snake_case = hf_model.feature_extractor
for name, value in fairseq_dict.items():
snake_case = False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , )
snake_case = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
snake_case = True
if "*" in mapped_key:
snake_case = name.split(__A )[0].split('''.''' )[-2]
snake_case = mapped_key.replace('''*''' , __A )
if "weight_g" in name:
snake_case = '''weight_g'''
elif "weight_v" in name:
snake_case = '''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
snake_case = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case = '''weight'''
else:
snake_case = None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __UpperCamelCase ( a : Any , a : Optional[Any] , a : Dict , a : Union[str, Any] , a : Tuple ) ->Optional[Any]:
snake_case = full_name.split('''conv_layers.''' )[-1]
snake_case = name.split('''.''' )
snake_case = int(items[0] )
snake_case = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__A )
@torch.no_grad()
def __UpperCamelCase ( a : Any , a : Tuple , a : Dict=None ) ->List[Any]:
snake_case = torch.load(__A )
snake_case = WavLMConfigOrig(checkpoint['''cfg'''] )
snake_case = WavLMOrig(__A )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
snake_case = WavLMConfig.from_pretrained(__A )
else:
snake_case = WavLMConfig()
snake_case = WavLMModel(__A )
recursively_load_weights(__A , __A )
hf_wavlm.save_pretrained(__A )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_lowercase = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 708
|
'''simple docstring'''
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
_lowercase = [
{'dataset': 'wikipedia', 'config_name': '20220301.de'},
{'dataset': 'wikipedia', 'config_name': '20220301.en'},
{'dataset': 'wikipedia', 'config_name': '20220301.fr'},
{'dataset': 'wikipedia', 'config_name': '20220301.frr'},
{'dataset': 'wikipedia', 'config_name': '20220301.it'},
{'dataset': 'wikipedia', 'config_name': '20220301.simple'},
{'dataset': 'snli', 'config_name': 'plain_text'},
{'dataset': 'eli5', 'config_name': 'LFQA_reddit'},
{'dataset': 'wiki40b', 'config_name': 'en'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.compressed'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.nq.no_index'},
{'dataset': 'wiki_dpr', 'config_name': 'psgs_w100.multiset.no_index'},
{'dataset': 'natural_questions', 'config_name': 'default'},
]
def __UpperCamelCase ( a : Dict=True ) ->str:
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=__a ) )
class _lowercase ( __a ):
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCamelCase ( self , A__ , A__ ) -> str:
with TemporaryDirectory() as tmp_dir:
snake_case = dataset_module_factory(A__ , cache_dir=A__ )
snake_case = import_main_class(dataset_module.module_path , dataset=A__ )
snake_case = builder_cls(
cache_dir=A__ , config_name=A__ , hash=dataset_module.hash , )
snake_case = '''/'''.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=A__ ).replace(os.sep , '''/''' ),
config.DATASET_INFO_FILENAME,
] )
snake_case = cached_path(A__ , cache_dir=A__ )
self.assertTrue(os.path.exists(A__ ) )
@pytest.mark.integration
def __UpperCamelCase ( a : List[str] ) ->Any:
snake_case = tmp_path_factory.mktemp('''test_hf_gcp''' ) / '''test_wikipedia_simple'''
snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a )
snake_case = import_main_class(dataset_module.module_path )
snake_case = builder_cls(
cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
snake_case = None
builder_instance.download_and_prepare()
snake_case = builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def __UpperCamelCase ( a : Any ) ->Union[str, Any]:
snake_case = dataset_module_factory('''wikipedia''' , cache_dir=a )
snake_case = import_main_class(dataset_module.module_path , dataset=a )
snake_case = builder_cls(
cache_dir=a , config_name='''20220301.frr''' , hash=dataset_module.hash , )
snake_case = builder_instance.as_streaming_dataset()
assert ds
assert isinstance(a , a )
assert "train" in ds
assert isinstance(ds['''train'''] , a )
assert next(iter(ds['''train'''] ) )
| 44
| 0
|
'''simple docstring'''
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
_lowercase = {
'n_samples': 64,
'horizon': 32,
'num_inference_steps': 20,
'n_guide_steps': 2, # can set to 0 for faster sampling, does not use value network
'scale_grad_by_std': True,
'scale': 0.1,
'eta': 0.0,
't_grad_cutoff': 2,
'device': 'cpu',
}
if __name__ == "__main__":
_lowercase = 'hopper-medium-v2'
_lowercase = gym.make(env_name)
_lowercase = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
_lowercase = env.reset()
_lowercase = 0
_lowercase = 0
_lowercase = 1_000
_lowercase = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
_lowercase = pipeline(obs, planning_horizon=32)
# execute action in environment
_lowercase = env.step(denorm_actions)
_lowercase = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
f'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
f' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
_lowercase = next_observation
except KeyboardInterrupt:
pass
print(f'Total reward: {total_reward}')
| 709
|
'''simple docstring'''
def __UpperCamelCase ( a : int , a : int ) ->int:
while b:
snake_case , snake_case = b, a % b
return a
def __UpperCamelCase ( a : int , a : int ) ->int:
return a if b == 0 else euclidean_gcd_recursive(a , a % b )
def __UpperCamelCase ( ) ->Optional[Any]:
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 44
| 0
|
'''simple docstring'''
from __future__ import annotations
def __UpperCamelCase ( a : Dict ) ->float:
if not nums:
raise ValueError('''List is empty''' )
return sum(lowerCamelCase__ ) / len(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710
|
'''simple docstring'''
import argparse
import copy
def __UpperCamelCase ( a : Union[str, Any] ) ->Tuple:
snake_case = {}
with open(a ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[1], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
snake_case = []
_list.append([line.split()[0], line.split()[2]] )
snake_case = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def __UpperCamelCase ( a : Dict , a : Tuple ) ->int:
with open(a ) as f:
snake_case = f.read(1 )
snake_case = start_node
snake_case = []
snake_case = start_node
snake_case = 0
while visiting not in first_solution:
snake_case = 1_0000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(a ) and k[0] not in first_solution:
snake_case = k[1]
snake_case = k[0]
first_solution.append(a )
snake_case = distance_of_first_solution + int(a )
snake_case = best_node
first_solution.append(a )
snake_case = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
snake_case = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0000
)
return first_solution, distance_of_first_solution
def __UpperCamelCase ( a : Optional[int] , a : str ) ->str:
snake_case = []
for n in solution[1:-1]:
snake_case = solution.index(a )
for kn in solution[1:-1]:
snake_case = solution.index(a )
if n == kn:
continue
snake_case = copy.deepcopy(a )
snake_case = kn
snake_case = n
snake_case = 0
for k in _tmp[:-1]:
snake_case = _tmp[_tmp.index(a ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
snake_case = distance + int(i[1] )
_tmp.append(a )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
snake_case = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda a : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def __UpperCamelCase ( a : Any , a : Optional[Any] , a : int , a : Optional[int] , a : Union[str, Any] ) ->List[Any]:
snake_case = 1
snake_case = first_solution
snake_case = []
snake_case = distance_of_first_solution
snake_case = solution
while count <= iters:
snake_case = find_neighborhood(a , a )
snake_case = 0
snake_case = neighborhood[index_of_best_solution]
snake_case = len(a ) - 1
snake_case = False
while not found:
snake_case = 0
while i < len(a ):
if best_solution[i] != solution[i]:
snake_case = best_solution[i]
snake_case = solution[i]
break
snake_case = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
snake_case = True
snake_case = best_solution[:-1]
snake_case = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
snake_case = cost
snake_case = solution
else:
snake_case = index_of_best_solution + 1
snake_case = neighborhood[index_of_best_solution]
if len(a ) >= size:
tabu_list.pop(0 )
snake_case = count + 1
return best_solution_ever, best_cost
def __UpperCamelCase ( a : Union[str, Any]=None ) ->Optional[Any]:
snake_case = generate_neighbours(args.File )
snake_case , snake_case = generate_first_solution(
args.File , a )
snake_case , snake_case = tabu_search(
a , a , a , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 44
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_lowercase = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 711
|
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 44
| 0
|
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def __UpperCamelCase ( a : str ) ->List[str]:
snake_case = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
snake_case = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
snake_case = 4
snake_case = 48
snake_case = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
snake_case = [6, 6, 6, 6]
snake_case = 60
snake_case = [6, 6, 6, 6]
snake_case = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
snake_case = 4
snake_case = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
snake_case = 1
snake_case = 1
snake_case = 126
snake_case = 7
snake_case = 255.0
snake_case = """"""
return config
def __UpperCamelCase ( a : List[str] , a : Optional[Any] ) ->int:
if "patch_embed.proj" in name and "layers" not in name:
snake_case = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
snake_case = name.replace('''patch_embed.norm''' , '''embeddings.patch_embeddings.layernorm''' )
if "layers" in name:
snake_case = name.replace('''layers''' , '''encoder.stages''' )
if "residual_group.blocks" in name:
snake_case = name.replace('''residual_group.blocks''' , '''layers''' )
if "attn.proj" in name:
snake_case = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case = name.replace('''mlp.fc2''' , '''output.dense''' )
if "q_bias" in name:
snake_case = name.replace('''q_bias''' , '''query.bias''' )
if "k_bias" in name:
snake_case = name.replace('''k_bias''' , '''key.bias''' )
if "v_bias" in name:
snake_case = name.replace('''v_bias''' , '''value.bias''' )
if "cpb_mlp" in name:
snake_case = name.replace('''cpb_mlp''' , '''continuous_position_bias_mlp''' )
if "patch_embed.proj" in name:
snake_case = name.replace('''patch_embed.proj''' , '''patch_embed.projection''' )
if name == "norm.weight":
snake_case = """layernorm.weight"""
if name == "norm.bias":
snake_case = """layernorm.bias"""
if "conv_first" in name:
snake_case = name.replace('''conv_first''' , '''first_convolution''' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
snake_case = name.replace('''conv_last''' , '''final_convolution''' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
snake_case = name.replace('''conv_before_upsample.0''' , '''conv_before_upsample''' )
if "upsample.0" in name:
snake_case = name.replace('''upsample.0''' , '''upsample.convolution_0''' )
if "upsample.2" in name:
snake_case = name.replace('''upsample.2''' , '''upsample.convolution_1''' )
snake_case = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
snake_case = name.replace('''upsample.0.weight''' , '''upsample.conv.weight''' )
snake_case = name.replace('''upsample.0.bias''' , '''upsample.conv.bias''' )
else:
pass
else:
snake_case = """swin2sr.""" + name
return name
def __UpperCamelCase ( a : Optional[int] , a : Any ) ->Tuple:
for key in orig_state_dict.copy().keys():
snake_case = orig_state_dict.pop(__lowerCAmelCase )
if "qkv" in key:
snake_case = key.split('''.''' )
snake_case = int(key_split[1] )
snake_case = int(key_split[4] )
snake_case = config.embed_dim
if "weight" in key:
snake_case = val[:dim, :]
snake_case = val[dim : dim * 2, :]
snake_case = val[-dim:, :]
else:
snake_case = val[:dim]
snake_case = val[dim : dim * 2]
snake_case = val[-dim:]
pass
else:
snake_case = val
return orig_state_dict
def __UpperCamelCase ( a : Optional[Any] , a : Optional[Any] , a : List[Any] ) ->Optional[Any]:
snake_case = get_config(__lowerCAmelCase )
snake_case = SwinaSRForImageSuperResolution(__lowerCAmelCase )
model.eval()
snake_case = torch.hub.load_state_dict_from_url(__lowerCAmelCase , map_location='''cpu''' )
snake_case = convert_state_dict(__lowerCAmelCase , __lowerCAmelCase )
snake_case = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
raise ValueError('''Missing keys when converting: {}'''.format(__lowerCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
snake_case = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
snake_case = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('''RGB''' )
snake_case = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
snake_case = 126 if """Jpeg""" in checkpoint_url else 256
snake_case = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
snake_case = transforms(__lowerCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
snake_case = pixel_values[:, 0, :, :].unsqueeze(1 )
snake_case = model(__lowerCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
snake_case = torch.Size([1, 3, 512, 512] )
snake_case = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
snake_case = torch.Size([1, 3, 1024, 1024] )
snake_case = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
snake_case = torch.Size([1, 3, 1024, 1024] )
snake_case = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
snake_case = torch.Size([1, 3, 512, 512] )
snake_case = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
snake_case = torch.Size([1, 3, 1024, 1024] )
snake_case = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __lowerCAmelCase , atol=1e-3 )
print('''Looks ok!''' )
snake_case = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
snake_case = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowerCAmelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
_lowercase = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 712
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class _lowercase ( __a ):
_UpperCAmelCase = '''WhisperFeatureExtractor'''
_UpperCAmelCase = '''WhisperTokenizer'''
def __init__( self , A__ , A__ ) -> Optional[Any]:
super().__init__(A__ , A__ )
snake_case = self.feature_extractor
snake_case = False
def UpperCamelCase ( self , A__=None , A__=None , A__=True ) -> Union[str, Any]:
return self.tokenizer.get_decoder_prompt_ids(task=A__ , language=A__ , no_timestamps=A__ )
def __call__( self , *A__ , **A__ ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ , **A__ )
snake_case = kwargs.pop('''audio''' , A__ )
snake_case = kwargs.pop('''sampling_rate''' , A__ )
snake_case = kwargs.pop('''text''' , A__ )
if len(A__ ) > 0:
snake_case = args[0]
snake_case = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
snake_case = self.feature_extractor(A__ , *A__ , sampling_rate=A__ , **A__ )
if text is not None:
snake_case = self.tokenizer(A__ , **A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
snake_case = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self , *A__ , **A__ ) -> Optional[Any]:
return self.tokenizer.batch_decode(*A__ , **A__ )
def UpperCamelCase ( self , *A__ , **A__ ) -> str:
return self.tokenizer.decode(*A__ , **A__ )
def UpperCamelCase ( self , A__ , A__="np" ) -> Optional[Any]:
return self.tokenizer.get_prompt_ids(A__ , return_tensors=A__ )
| 44
| 0
|
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class _lowercase :
def __init__( self , A__ , A__ , A__ , A__=None , A__=None ) -> List[str]:
snake_case = start
snake_case = end
snake_case = val
snake_case = (start + end) // 2
snake_case = left
snake_case = right
def __repr__( self ) -> List[str]:
return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"""
class _lowercase :
def __init__( self , A__ , A__ ) -> Optional[int]:
snake_case = collection
snake_case = function
if self.collection:
snake_case = self._build_tree(0 , len(A__ ) - 1 )
def UpperCamelCase ( self , A__ , A__ ) -> Optional[Any]:
self._update_tree(self.root , A__ , A__ )
def UpperCamelCase ( self , A__ , A__ ) -> Any:
return self._query_range(self.root , A__ , A__ )
def UpperCamelCase ( self , A__ , A__ ) -> Optional[int]:
if start == end:
return SegmentTreeNode(A__ , A__ , self.collection[start] )
snake_case = (start + end) // 2
snake_case = self._build_tree(A__ , A__ )
snake_case = self._build_tree(mid + 1 , A__ )
return SegmentTreeNode(A__ , A__ , self.fn(left.val , right.val ) , A__ , A__ )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Tuple:
if node.start == i and node.end == i:
snake_case = val
return
if i <= node.mid:
self._update_tree(node.left , A__ , A__ )
else:
self._update_tree(node.right , A__ , A__ )
snake_case = self.fn(node.left.val , node.right.val )
def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[Any]:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , A__ , A__ )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , A__ , node.mid ) , self._query_range(node.right , node.mid + 1 , A__ ) , )
else:
# range in right child tree
return self._query_range(node.right , A__ , A__ )
def UpperCamelCase ( self ) -> Optional[int]:
if self.root is not None:
snake_case = Queue()
queue.put(self.root )
while not queue.empty():
snake_case = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
_lowercase = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 713
|
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _lowercase ( __a ):
_UpperCAmelCase = '''char'''
_UpperCAmelCase = '''bpe'''
_UpperCAmelCase = '''wp'''
_lowercase = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _lowercase ( __a ):
_UpperCAmelCase = ['''image_processor''', '''char_tokenizer''']
_UpperCAmelCase = '''ViTImageProcessor'''
_UpperCAmelCase = '''MgpstrTokenizer'''
def __init__( self , A__=None , A__=None , **A__ ) -> List[Any]:
snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A__ , )
snake_case = kwargs.pop('''feature_extractor''' )
snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
snake_case = tokenizer
snake_case = AutoTokenizer.from_pretrained('''gpt2''' )
snake_case = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(A__ , A__ )
def __call__( self , A__=None , A__=None , A__=None , **A__ ) -> List[str]:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
snake_case = self.image_processor(A__ , return_tensors=A__ , **A__ )
if text is not None:
snake_case = self.char_tokenizer(A__ , return_tensors=A__ , **A__ )
if text is None:
return inputs
elif images is None:
return encodings
else:
snake_case = encodings['''input_ids''']
return inputs
def UpperCamelCase ( self , A__ ) -> Dict:
snake_case , snake_case , snake_case = sequences
snake_case = char_preds.size(0 )
snake_case , snake_case = self._decode_helper(A__ , '''char''' )
snake_case , snake_case = self._decode_helper(A__ , '''bpe''' )
snake_case , snake_case = self._decode_helper(A__ , '''wp''' )
snake_case = []
snake_case = []
for i in range(A__ ):
snake_case = [char_scores[i], bpe_scores[i], wp_scores[i]]
snake_case = [char_strs[i], bpe_strs[i], wp_strs[i]]
snake_case = scores.index(max(A__ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
snake_case = {}
snake_case = final_strs
snake_case = final_scores
snake_case = char_strs
snake_case = bpe_strs
snake_case = wp_strs
return out
def UpperCamelCase ( self , A__ , A__ ) -> Optional[Any]:
if format == DecodeType.CHARACTER:
snake_case = self.char_decode
snake_case = 1
snake_case = '''[s]'''
elif format == DecodeType.BPE:
snake_case = self.bpe_decode
snake_case = 2
snake_case = '''#'''
elif format == DecodeType.WORDPIECE:
snake_case = self.wp_decode
snake_case = 1_02
snake_case = '''[SEP]'''
else:
raise ValueError(F"""Format {format} is not supported.""" )
snake_case , snake_case = [], []
snake_case = pred_logits.size(0 )
snake_case = pred_logits.size(1 )
snake_case , snake_case = pred_logits.topk(1 , dim=-1 , largest=A__ , sorted=A__ )
snake_case = preds_index.view(-1 , A__ )[:, 1:]
snake_case = decoder(A__ )
snake_case , snake_case = torch.nn.functional.softmax(A__ , dim=2 ).max(dim=2 )
snake_case = preds_max_prob[:, 1:]
for index in range(A__ ):
snake_case = preds_str[index].find(A__ )
snake_case = preds_str[index][:pred_eos]
snake_case = preds_index[index].cpu().tolist()
snake_case = pred_index.index(A__ ) if eos_token in pred_index else -1
snake_case = preds_max_prob[index][: pred_eos_index + 1]
snake_case = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(A__ )
conf_scores.append(A__ )
return dec_strs, conf_scores
def UpperCamelCase ( self , A__ ) -> int:
snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(A__ )]
return decode_strs
def UpperCamelCase ( self , A__ ) -> List[str]:
return self.bpe_tokenizer.batch_decode(A__ )
def UpperCamelCase ( self , A__ ) -> Union[str, Any]:
snake_case = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(A__ )]
return decode_strs
| 44
| 0
|
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
_lowercase = logging.get_logger(__name__)
_lowercase = {
'facebook/detr-resnet-50': 'https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class _lowercase ( __a ):
_UpperCAmelCase = '''detr'''
_UpperCAmelCase = ['''past_key_values''']
_UpperCAmelCase = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , A__=True , A__=None , A__=3 , A__=1_00 , A__=6 , A__=20_48 , A__=8 , A__=6 , A__=20_48 , A__=8 , A__=0.0 , A__=0.0 , A__=True , A__="relu" , A__=2_56 , A__=0.1 , A__=0.0 , A__=0.0 , A__=0.0_2 , A__=1.0 , A__=False , A__="sine" , A__="resnet50" , A__=True , A__=False , A__=1 , A__=5 , A__=2 , A__=1 , A__=1 , A__=5 , A__=2 , A__=0.1 , **A__ , ) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
snake_case = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case = backbone_config.get('''model_type''' )
snake_case = CONFIG_MAPPING[backbone_model_type]
snake_case = config_class.from_dict(_UpperCAmelCase )
# set timm attributes to None
snake_case , snake_case , snake_case = None, None, None
snake_case = use_timm_backbone
snake_case = backbone_config
snake_case = num_channels
snake_case = num_queries
snake_case = d_model
snake_case = encoder_ffn_dim
snake_case = encoder_layers
snake_case = encoder_attention_heads
snake_case = decoder_ffn_dim
snake_case = decoder_layers
snake_case = decoder_attention_heads
snake_case = dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = activation_function
snake_case = init_std
snake_case = init_xavier_std
snake_case = encoder_layerdrop
snake_case = decoder_layerdrop
snake_case = encoder_layers
snake_case = auxiliary_loss
snake_case = position_embedding_type
snake_case = backbone
snake_case = use_pretrained_backbone
snake_case = dilation
# Hungarian matcher
snake_case = class_cost
snake_case = bbox_cost
snake_case = giou_cost
# Loss coefficients
snake_case = mask_loss_coefficient
snake_case = dice_loss_coefficient
snake_case = bbox_loss_coefficient
snake_case = giou_loss_coefficient
snake_case = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def UpperCamelCase ( self ) -> int:
return self.encoder_attention_heads
@property
def UpperCamelCase ( self ) -> int:
return self.d_model
@classmethod
def UpperCamelCase ( cls , A__ , **A__ ) -> str:
return cls(backbone_config=_UpperCAmelCase , **_UpperCAmelCase )
def UpperCamelCase ( self ) -> Dict[str, any]:
snake_case = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
snake_case = self.backbone_config.to_dict()
snake_case = self.__class__.model_type
return output
class _lowercase ( __a ):
_UpperCAmelCase = version.parse('''1.11''' )
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def UpperCamelCase ( self ) -> float:
return 1e-5
@property
def UpperCamelCase ( self ) -> int:
return 12
| 714
|
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
_lowercase , _lowercase , _lowercase = False, False, False
@dataclass
class _lowercase :
_UpperCAmelCase = None
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = None
# Automatically constructed
_UpperCAmelCase = "dict"
_UpperCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
_UpperCAmelCase = field(default='''Audio''' , init=__a , repr=__a )
def __call__( self ) -> Optional[Any]:
return self.pa_type
def UpperCamelCase ( self , A__ ) -> dict:
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(A__ , A__ ):
return {"bytes": None, "path": value}
elif isinstance(A__ , A__ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
snake_case = BytesIO()
sf.write(A__ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
snake_case = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
snake_case = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_27_67
snake_case = BytesIO(bytes() )
sf.write(A__ , A__ , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
F"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def UpperCamelCase ( self , A__ , A__ = None ) -> dict:
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
snake_case , snake_case = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(F"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
snake_case = xsplitext(A__ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
snake_case = token_per_repo_id or {}
snake_case = path.split('''::''' )[-1]
try:
snake_case = string_to_dict(A__ , config.HUB_DATASETS_URL )['''repo_id''']
snake_case = token_per_repo_id[repo_id]
except (ValueError, KeyError):
snake_case = None
with xopen(A__ , '''rb''' , use_auth_token=A__ ) as f:
snake_case , snake_case = sf.read(A__ )
else:
snake_case , snake_case = sf.read(A__ )
snake_case = array.T
if self.mono:
snake_case = librosa.to_mono(A__ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
snake_case = librosa.resample(A__ , orig_sr=A__ , target_sr=self.sampling_rate )
snake_case = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCamelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def UpperCamelCase ( self , A__ ) -> pa.StructArray:
if pa.types.is_string(storage.type ):
snake_case = pa.array([None] * len(A__ ) , type=pa.binary() )
snake_case = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
snake_case = pa.array([None] * len(A__ ) , type=pa.string() )
snake_case = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
snake_case = pa.array([Audio().encode_example(A__ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
snake_case = storage.field('''bytes''' )
else:
snake_case = pa.array([None] * len(A__ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
snake_case = storage.field('''path''' )
else:
snake_case = pa.array([None] * len(A__ ) , type=pa.string() )
snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(A__ , self.pa_type )
def UpperCamelCase ( self , A__ ) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(A__ ):
with xopen(A__ , '''rb''' ) as f:
snake_case = f.read()
return bytes_
snake_case = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
snake_case = pa.array(
[os.path.basename(A__ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
snake_case = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(A__ , self.pa_type )
| 44
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
_lowercase = "hf-internal-testing/tiny-random-bert"
_lowercase = os.path.join(TRANSFORMERS_CACHE, 'models--hf-internal-testing--tiny-random-bert')
_lowercase = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class _lowercase ( unittest.TestCase ):
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case = cached_file(_UpperCamelCase , _UpperCamelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(_UpperCamelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(_UpperCamelCase , _UpperCamelCase ) ) )
with open(os.path.join(_UpperCamelCase , '''refs''' , '''main''' ) ) as f:
snake_case = f.read()
self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , '''snapshots''' , _UpperCamelCase , _UpperCamelCase ) )
self.assertTrue(os.path.isfile(_UpperCamelCase ) )
# File is cached at the same place the second time.
snake_case = cached_file(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
# Using a specific revision to test the full commit hash.
snake_case = cached_file(_UpperCamelCase , _UpperCamelCase , revision='''9b8c223''' )
self.assertEqual(_UpperCamelCase , os.path.join(_UpperCamelCase , '''snapshots''' , _UpperCamelCase , _UpperCamelCase ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid model identifier''' ):
snake_case = cached_file('''tiny-random-bert''' , _UpperCamelCase )
with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid git identifier''' ):
snake_case = cached_file(_UpperCamelCase , _UpperCamelCase , revision='''aaaa''' )
with self.assertRaisesRegex(_UpperCamelCase , '''does not appear to have a file named''' ):
snake_case = cached_file(_UpperCamelCase , '''conf''' )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(_UpperCamelCase , '''does not appear to have a file named''' ):
snake_case = cached_file(_UpperCamelCase , '''conf''' )
with open(os.path.join(_UpperCamelCase , '''refs''' , '''main''' ) ) as f:
snake_case = f.read()
self.assertTrue(os.path.isfile(os.path.join(_UpperCamelCase , '''.no_exist''' , _UpperCamelCase , '''conf''' ) ) )
snake_case = cached_file(_UpperCamelCase , '''conf''' , _raise_exceptions_for_missing_entries=_UpperCamelCase )
self.assertIsNone(_UpperCamelCase )
snake_case = cached_file(_UpperCamelCase , '''conf''' , local_files_only=_UpperCamelCase , _raise_exceptions_for_missing_entries=_UpperCamelCase )
self.assertIsNone(_UpperCamelCase )
snake_case = mock.Mock()
snake_case = 5_00
snake_case = {}
snake_case = HTTPError
snake_case = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('''requests.Session.request''' , return_value=_UpperCamelCase ) as mock_head:
snake_case = cached_file(_UpperCamelCase , '''conf''' , _raise_exceptions_for_connection_errors=_UpperCamelCase )
self.assertIsNone(_UpperCamelCase )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
self.assertTrue(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) )
self.assertFalse(has_file('''hf-internal-testing/tiny-bert-pt-only''' , _UpperCamelCase ) )
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
self.assertIsNone(get_file_from_repo('''bert-base-cased''' , '''ahah.txt''' ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid model identifier''' ):
get_file_from_repo('''bert-base-case''' , _UpperCamelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(_UpperCamelCase , '''is not a valid git identifier''' ):
get_file_from_repo('''bert-base-cased''' , _UpperCamelCase , revision='''ahaha''' )
snake_case = get_file_from_repo('''bert-base-cased''' , _UpperCamelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
snake_case = json.loads(open(_UpperCamelCase , '''r''' ).read() )
self.assertEqual(config['''hidden_size'''] , 7_68 )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = Path(_UpperCamelCase ) / """a.txt"""
filename.touch()
self.assertEqual(get_file_from_repo(_UpperCamelCase , '''a.txt''' ) , str(_UpperCamelCase ) )
self.assertIsNone(get_file_from_repo(_UpperCamelCase , '''b.txt''' ) )
| 715
|
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _lowercase :
@staticmethod
def UpperCamelCase ( *A__ , **A__ ) -> List[Any]:
pass
def __UpperCamelCase ( a : Image ) ->str:
snake_case = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _lowercase ( unittest.TestCase ):
_UpperCAmelCase = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase ( self , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case = DepthEstimationPipeline(model=A__ , image_processor=A__ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase ( self , A__ , A__ ) -> List[Any]:
snake_case = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A__ )
import datasets
snake_case = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
snake_case = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , A__ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def UpperCamelCase ( self ) -> Optional[Any]:
pass
@slow
@require_torch
def UpperCamelCase ( self ) -> Dict:
snake_case = '''Intel/dpt-large'''
snake_case = pipeline('''depth-estimation''' , model=A__ )
snake_case = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
snake_case = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 2_9.3_0_4 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.6_6_2 )
@require_torch
def UpperCamelCase ( self ) -> Any:
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 44
| 0
|
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
_lowercase = getLogger(__name__)
_lowercase = 'cuda' if torch.cuda.is_available() else 'cpu'
def __UpperCamelCase ( a : int , a : Tuple , a : int , a : Any = 8 , a : List[Any] = DEFAULT_DEVICE , a : Any=False , a : Optional[int]="summarization" , a : Union[str, Any]=None , **a : List[str] , ) ->Optional[int]:
snake_case = Path(_lowerCamelCase ).open('''w''' , encoding='''utf-8''' )
snake_case = str(_lowerCamelCase )
snake_case = AutoModelForSeqaSeqLM.from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
if fpaa:
snake_case = model.half()
snake_case = AutoTokenizer.from_pretrained(_lowerCamelCase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
snake_case = time.time()
# update config with task specific params
use_task_specific_params(_lowerCamelCase , _lowerCamelCase )
if prefix is None:
snake_case = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''
for examples_chunk in tqdm(list(chunks(_lowerCamelCase , _lowerCamelCase ) ) ):
snake_case = [prefix + text for text in examples_chunk]
snake_case = tokenizer(_lowerCamelCase , return_tensors='''pt''' , truncation=_lowerCamelCase , padding='''longest''' ).to(_lowerCamelCase )
snake_case = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowerCamelCase , )
snake_case = tokenizer.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase , clean_up_tokenization_spaces=_lowerCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
snake_case = int(time.time() - start_time ) # seconds
snake_case = len(_lowerCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def __UpperCamelCase ( ) ->int:
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def __UpperCamelCase ( a : List[Any]=True ) ->List[Any]:
snake_case = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=_lowerCamelCase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=_lowerCamelCase , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=_lowerCamelCase , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=_lowerCamelCase , required=_lowerCamelCase , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=_lowerCamelCase , required=_lowerCamelCase , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=_lowerCamelCase , required=_lowerCamelCase , default=_lowerCamelCase , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=_lowerCamelCase , required=_lowerCamelCase , default=_lowerCamelCase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=_lowerCamelCase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_lowerCamelCase , default=8 , required=_lowerCamelCase , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=_lowerCamelCase , default=-1 , required=_lowerCamelCase , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=_lowerCamelCase , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
snake_case = parser.parse_known_args()
snake_case = parse_numeric_n_bool_cl_kwargs(_lowerCamelCase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
snake_case = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
snake_case = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowerCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
snake_case = generate_summaries_or_translations(
_lowerCamelCase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowerCamelCase , )
if args.reference_path is None:
return {}
# Compute scores
snake_case = calculate_bleu if 'translation' in args.task else calculate_rouge
snake_case = [x.rstrip() for x in open(args.save_path ).readlines()]
snake_case = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowerCamelCase )]
snake_case = score_fn(_lowerCamelCase , _lowerCamelCase )
scores.update(_lowerCamelCase )
if args.dump_args:
scores.update(_lowerCamelCase )
if args.info:
snake_case = args.info
if verbose:
print(_lowerCamelCase )
if args.score_path is not None:
json.dump(_lowerCamelCase , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 716
|
'''simple docstring'''
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def __UpperCamelCase ( a : Optional[int] ) ->Dict:
snake_case = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(a , a )
def __UpperCamelCase ( a : Optional[Any] ) ->int:
snake_case = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
snake_case = s_dict.pop(a )
elif "subsample" in key:
snake_case = s_dict.pop(a )
def __UpperCamelCase ( a : Optional[int] ) ->Optional[int]:
snake_case , snake_case = emb.weight.shape
snake_case = nn.Linear(a , a , bias=a )
snake_case = emb.weight.data
return lin_layer
def __UpperCamelCase ( a : Any , a : Tuple ) ->Tuple:
snake_case = torch.load(a , map_location='''cpu''' )
snake_case = mam_aaa['''args''']
snake_case = mam_aaa['''model''']
snake_case = state_dict['''decoder.output_projection.weight''']
remove_ignore_keys_(a )
rename_keys(a )
snake_case = state_dict['''decoder.embed_tokens.weight'''].shape[0]
snake_case = args.share_decoder_input_output_embed
snake_case = [int(a ) for i in args.conv_kernel_sizes.split(''',''' )]
snake_case = SpeechaTextConfig(
vocab_size=a , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(a ) , conv_channels=args.conv_channels , conv_kernel_sizes=a , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=a , num_beams=5 , max_length=200 , use_cache=a , decoder_start_token_id=2 , early_stopping=a , )
snake_case = SpeechaTextForConditionalGeneration(a )
snake_case , snake_case = model.model.load_state_dict(a , strict=a )
if len(a ) > 0 and not set(a ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
f""" but all the following weights are missing {missing}""" )
if tie_embeds:
snake_case = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
snake_case = lm_head_weights
model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
_lowercase = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 44
| 0
|
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
_lowercase = parser.parse_args()
_lowercase = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 717
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=__a ):
_UpperCAmelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *A__ , **A__ ) -> Union[str, Any]:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Optional[Any]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Any:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 44
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
_lowercase = logging.get_logger(__name__)
class _lowercase ( _UpperCAmelCase ):
def __init__( self , *A__ , **A__ ) -> None:
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , lowerCamelCase_ , )
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
| 718
|
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
class _lowercase :
def __init__( self , A__ ) -> None:
snake_case = value
snake_case = None
snake_case = None
class _lowercase :
def __init__( self , A__ ) -> None:
snake_case = tree
def UpperCamelCase ( self , A__ ) -> int:
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ) -> Iterator[int]:
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 0
|
'''simple docstring'''
import math
import sys
import cva
import numpy as np
def __UpperCamelCase ( a : List[Any] , a : Any ) ->np.ndarray:
snake_case = math.sqrt(_A )
snake_case = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def __UpperCamelCase ( a : Union[str, Any] , a : Optional[Any] , a : Optional[int] , a : Optional[int] ) ->np.ndarray:
snake_case = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def __UpperCamelCase ( a : Dict , a : Optional[Any] ) ->np.ndarray:
snake_case = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
snake_case = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def __UpperCamelCase ( a : int , a : int , a : Dict , a : Optional[Any] , ) ->np.ndarray:
snake_case = np.zeros(img.shape )
snake_case = get_gauss_kernel(_A , _A )
snake_case , snake_case = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
snake_case = get_slice(_A , _A , _A , _A )
snake_case = img_s - img_s[kernel_size // 2, kernel_size // 2]
snake_case = vec_gaussian(_A , _A )
snake_case = np.multiply(_A , _A )
snake_case = np.multiply(_A , _A )
snake_case = np.sum(_A ) / np.sum(_A )
snake_case = val
return imga
def __UpperCamelCase ( a : str ) ->tuple:
snake_case = args[1] if args[1:] else '''../image_data/lena.jpg'''
snake_case = float(args[2] ) if args[2:] else 1.0
snake_case = float(args[3] ) if args[3:] else 1.0
if args[4:]:
snake_case = int(args[4] )
snake_case = kernel_size + abs(kernel_size % 2 - 1 )
else:
snake_case = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
_lowercase = parse_args(sys.argv)
_lowercase = cva.imread(filename, 0)
cva.imshow('input image', img)
_lowercase = img / 255
_lowercase = out.astype('float32')
_lowercase = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
_lowercase = out * 255
_lowercase = np.uinta(out)
cva.imshow('output image', out)
cva.waitKey(0)
cva.destroyAllWindows()
| 719
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_lowercase = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def __UpperCamelCase ( a : List[str] ) ->Optional[int]:
snake_case = torch.load(a , map_location='''cpu''' )
return sd
def __UpperCamelCase ( a : Optional[int] , a : Union[str, Any] , a : int=rename_keys_prefix ) ->Tuple:
snake_case = OrderedDict()
snake_case = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
snake_case = key
for name_pair in rename_keys_prefix:
snake_case = new_key.replace(name_pair[0] , name_pair[1] )
snake_case = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
snake_case = new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def __UpperCamelCase ( a : Optional[int] , a : int ) ->Union[str, Any]:
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), f"""The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}."""
# Get Config
if "pre" in checkpoint_path:
snake_case = '''pretraining'''
if "vcr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 512}
elif "vqa_advanced" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
elif "vqa" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
elif "nlvr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 1024}
else:
raise NotImplementedError(f"""No implementation found for `{checkpoint_path}`.""" )
else:
if "vcr" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 512}
snake_case = '''multichoice'''
elif "vqa_advanced" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048}
snake_case = '''vqa_advanced'''
elif "vqa" in checkpoint_path:
snake_case = {'''visual_embedding_dim''': 2048, '''num_labels''': 3129}
snake_case = '''vqa'''
elif "nlvr" in checkpoint_path:
snake_case = {
'''visual_embedding_dim''': 1024,
'''num_labels''': 2,
}
snake_case = '''nlvr'''
snake_case = VisualBertConfig(**a )
# Load State Dict
snake_case = load_state_dict(a )
snake_case = get_new_dict(a , a )
if model_type == "pretraining":
snake_case = VisualBertForPreTraining(a )
elif model_type == "vqa":
snake_case = VisualBertForQuestionAnswering(a )
elif model_type == "nlvr":
snake_case = VisualBertForVisualReasoning(a )
elif model_type == "multichoice":
snake_case = VisualBertForMultipleChoice(a )
model.load_state_dict(a )
# Save Checkpoints
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_lowercase = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""kssteven/ibert-roberta-base""": """https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json""",
"""kssteven/ibert-roberta-large""": """https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json""",
"""kssteven/ibert-roberta-large-mnli""": (
"""https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"""
),
}
class _lowercase ( _a ):
_UpperCAmelCase = """ibert"""
def __init__( self , A__=3_05_22 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=2 , A__=0.0_2 , A__=1e-12 , A__=1 , A__=0 , A__=2 , A__="absolute" , A__=False , A__="none" , **A__ , ) -> Optional[int]:
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
snake_case = vocab_size
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = hidden_act
snake_case = intermediate_size
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = max_position_embeddings
snake_case = type_vocab_size
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = position_embedding_type
snake_case = quant_mode
snake_case = force_dequant
class _lowercase ( _a ):
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
snake_case = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
snake_case = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 720
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def __UpperCamelCase ( a : Dict , a : Optional[int] , a : Dict , a : Dict ) ->Union[str, Any]:
snake_case = original_name.split('''.''' )[0]
snake_case = key.split('''.''' )
snake_case = int(key_list[key_list.index(a ) - 2] )
snake_case = int(key_list[key_list.index(a ) - 1] )
snake_case = orig_block_num - offset
snake_case = key.replace(f"""{orig_block_num}.{layer_num}.{original_name}""" , f"""block.{new_block_num}.{layer_num}.{new_name}""" )
return key
def __UpperCamelCase ( a : Tuple ) ->Dict:
snake_case = OrderedDict()
snake_case , snake_case = 0, 0
for key, value in state_dict.items():
if key.startswith('''network''' ):
snake_case = key.replace('''network''' , '''poolformer.encoder''' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('''bias''' ) and "patch_embed" not in key:
patch_emb_offset += 1
snake_case = key[: key.find('''proj''' )]
snake_case = key.replace(a , f"""patch_embeddings.{total_embed_found}.""" )
snake_case = key.replace('''proj''' , '''projection''' )
if key.endswith('''bias''' ):
total_embed_found += 1
if "patch_embeddings" in key:
snake_case = '''poolformer.encoder.''' + key
if "mlp.fc1" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc1''' , '''output.conv1''' )
if "mlp.fc2" in key:
snake_case = replace_key_with_offset(a , a , '''mlp.fc2''' , '''output.conv2''' )
if "norm1" in key:
snake_case = replace_key_with_offset(a , a , '''norm1''' , '''before_norm''' )
if "norm2" in key:
snake_case = replace_key_with_offset(a , a , '''norm2''' , '''after_norm''' )
if "layer_scale_1" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_1''' , '''layer_scale_1''' )
if "layer_scale_2" in key:
snake_case = replace_key_with_offset(a , a , '''layer_scale_2''' , '''layer_scale_2''' )
if "head" in key:
snake_case = key.replace('''head''' , '''classifier''' )
snake_case = value
return new_state_dict
def __UpperCamelCase ( ) ->Optional[int]:
snake_case = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case = Image.open(requests.get(a , stream=a ).raw )
return image
@torch.no_grad()
def __UpperCamelCase ( a : Dict , a : Optional[Any] , a : Tuple ) ->List[str]:
snake_case = PoolFormerConfig()
# set attributes based on model_name
snake_case = '''huggingface/label-files'''
snake_case = model_name[-3:]
snake_case = 1000
snake_case = '''imagenet-1k-id2label.json'''
snake_case = (1, 1000)
# set config attributes
snake_case = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
snake_case = {int(a ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
if size == "s12":
snake_case = [2, 2, 6, 2]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s24":
snake_case = [4, 4, 12, 4]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 0.9
elif size == "s36":
snake_case = [6, 6, 18, 6]
snake_case = [64, 128, 320, 512]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.9
elif size == "m36":
snake_case = [6, 6, 18, 6]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
elif size == "m48":
snake_case = [8, 8, 24, 8]
snake_case = [96, 192, 384, 768]
snake_case = 4.0
snake_case = 1e-6
snake_case = 0.95
else:
raise ValueError(f"""Size {size} not supported""" )
# load image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
# Prepare image
snake_case = prepare_img()
snake_case = image_processor(images=a , return_tensors='''pt''' ).pixel_values
logger.info(f"""Converting model {model_name}...""" )
# load original state dict
snake_case = torch.load(a , map_location=torch.device('''cpu''' ) )
# rename keys
snake_case = rename_keys(a )
# create HuggingFace model and load state dict
snake_case = PoolFormerForImageClassification(a )
model.load_state_dict(a )
model.eval()
# Define image processor
snake_case = PoolFormerImageProcessor(crop_pct=a )
snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' ).pixel_values
# forward pass
snake_case = model(a )
snake_case = outputs.logits
# define expected logit slices for different models
if size == "s12":
snake_case = torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
snake_case = torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
snake_case = torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
snake_case = torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
snake_case = torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(f"""Size {size} not supported""" )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , a , atol=1e-2 )
# finally, save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(a ).mkdir(exist_ok=a )
model.save_pretrained(a )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(a )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_lowercase = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 44
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.